You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by vg...@apache.org on 2016/07/02 01:35:40 UTC

hive git commit: HIVE-14038 miscellaneous acid improvements (Eugene Koifman, reviewed by Wei Zheng)

Repository: hive
Updated Branches:
  refs/heads/branch-2.1 e2da0e163 -> 21da55964


HIVE-14038 miscellaneous acid improvements (Eugene Koifman, reviewed by Wei Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/21da5596
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/21da5596
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/21da5596

Branch: refs/heads/branch-2.1
Commit: 21da55964f17abc2461a4baf2eb38afeb752e11c
Parents: e2da0e1
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Tue Jun 21 12:11:10 2016 -0700
Committer: Vaibhav Gumashta <vg...@hortonworks.com>
Committed: Fri Jul 1 18:31:25 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/conf/HiveConfUtil.java   |  50 ++++++++
 .../hive/hcatalog/templeton/AppConfig.java      |  41 +-----
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   7 ++
 .../txn/compactor/HouseKeeperServiceBase.java   |   2 +-
 .../apache/hadoop/hive/ql/TestTxnCommands.java  |  34 ++++-
 .../hive/ql/lockmgr/TestDbTxnManager2.java      | 126 ++++++++++++++++++-
 6 files changed, 212 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/21da5596/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
index 0d3b94c..073a978 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
@@ -18,8 +18,18 @@
 
 package org.apache.hadoop.hive.conf;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.Private;
 
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.StringTokenizer;
+
 /**
  * Hive Configuration utils
  */
@@ -35,4 +45,44 @@ public class HiveConfUtil {
   public static boolean isEmbeddedMetaStore(String msUri) {
     return (msUri == null) ? true : msUri.trim().isEmpty();
   }
+
+  /**
+   * Dumps all HiveConf for debugging.  Convenient to dump state at process start up and log it
+   * so that in later analysis the values of all variables is known
+   */
+  public static StringBuilder dumpConfig(HiveConf conf) {
+    StringBuilder sb = new StringBuilder("START========\"HiveConf()\"========\n");
+    sb.append("hiveDefaultUrl=").append(conf.getHiveDefaultLocation()).append('\n');
+    sb.append("hiveSiteURL=").append(HiveConf.getHiveSiteLocation()).append('\n');
+    sb.append("hiveServer2SiteUrl=").append(HiveConf.getHiveServer2SiteLocation()).append('\n');
+    sb.append("hivemetastoreSiteUrl=").append(HiveConf.getMetastoreSiteLocation()).append('\n');
+    dumpConfig(conf, sb);
+    return sb.append("END========\"new HiveConf()\"========\n");
+  }
+  public static void dumpConfig(Configuration conf, StringBuilder sb) {
+    Iterator<Map.Entry<String, String>> configIter = conf.iterator();
+    List<Map.Entry<String, String>> configVals = new ArrayList<>();
+    while(configIter.hasNext()) {
+      configVals.add(configIter.next());
+    }
+    Collections.sort(configVals, new Comparator<Map.Entry<String, String>>() {
+      @Override
+      public int compare(Map.Entry<String, String> ent, Map.Entry<String, String> ent2) {
+        return ent.getKey().compareTo(ent2.getKey());
+      }
+    });
+    for(Map.Entry<String, String> entry : configVals) {
+      //use get() to make sure variable substitution works
+      if(entry.getKey().toLowerCase().contains("path")) {
+        StringTokenizer st = new StringTokenizer(conf.get(entry.getKey()), File.pathSeparator);
+        sb.append(entry.getKey()).append("=\n");
+        while(st.hasMoreTokens()) {
+          sb.append("    ").append(st.nextToken()).append(File.pathSeparator).append('\n');
+        }
+      }
+      else {
+        sb.append(entry.getKey()).append('=').append(conf.get(entry.getKey())).append('\n');
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/21da5596/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
index 10caf9b..dd1208b 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
@@ -24,17 +24,15 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Comparator;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.StringTokenizer;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConfUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hive.hcatalog.templeton.tool.JobState;
@@ -247,47 +245,14 @@ public class AppConfig extends Configuration {
   private String dumpEnvironent() {
     StringBuilder sb = TempletonUtils.dumpPropMap("========WebHCat System.getenv()========", System.getenv());
     sb.append("START========WebHCat AppConfig.iterator()========: \n");
-    dumpConfig(this, sb);
+    HiveConfUtil.dumpConfig(this, sb);
     sb.append("END========WebHCat AppConfig.iterator()========: \n");
 
     sb.append(TempletonUtils.dumpPropMap("========WebHCat System.getProperties()========", System.getProperties()));
 
-    sb.append("START========\"new HiveConf()\"========\n");
-    HiveConf c = new HiveConf();
-    sb.append("hiveDefaultUrl=").append(c.getHiveDefaultLocation()).append('\n');
-    sb.append("hiveSiteURL=").append(HiveConf.getHiveSiteLocation()).append('\n');
-    sb.append("hiveServer2SiteUrl=").append(HiveConf.getHiveServer2SiteLocation()).append('\n');
-    sb.append("hivemetastoreSiteUrl=").append(HiveConf.getMetastoreSiteLocation()).append('\n');
-    dumpConfig(c, sb);
-    sb.append("END========\"new HiveConf()\"========\n");
+    sb.append(HiveConfUtil.dumpConfig(new HiveConf()));
     return sb.toString();
   }
-  private static void dumpConfig(Configuration conf, StringBuilder sb) {
-    Iterator<Map.Entry<String, String>> configIter = conf.iterator();
-    List<Map.Entry<String, String>>configVals = new ArrayList<>();
-    while(configIter.hasNext()) {
-      configVals.add(configIter.next());
-    }
-    Collections.sort(configVals, new Comparator<Map.Entry<String, String>> () {
-      @Override
-      public int compare(Map.Entry<String, String> ent, Map.Entry<String, String> ent2) {
-        return ent.getKey().compareTo(ent2.getKey());
-      }
-    });
-    for(Map.Entry<String, String> entry : configVals) {
-      //use get() to make sure variable substitution works
-      if(entry.getKey().toLowerCase().contains("path")) {
-        StringTokenizer st = new StringTokenizer(conf.get(entry.getKey()), File.pathSeparator);
-        sb.append(entry.getKey()).append("=\n");
-        while(st.hasMoreTokens()) {
-          sb.append("    ").append(st.nextToken()).append(File.pathSeparator).append('\n');
-        }
-      }
-      else {
-        sb.append(entry.getKey()).append('=').append(conf.get(entry.getKey())).append('\n');
-      }
-    }
-  }
 
   public JobsListOrder getListJobsOrder() {
     String requestedOrder = get(TEMPLETON_JOBSLIST_ORDER);

http://git-wip-us.apache.org/repos/asf/hive/blob/21da5596/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 7a89a0c..119b08e 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -39,6 +39,7 @@ import org.apache.commons.pool.impl.GenericObjectPool;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StringableMap;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConfUtil;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.util.StringUtils;
@@ -204,6 +205,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
    */
   private final static ConcurrentHashMap<String, Semaphore> derbyKey2Lock = new ConcurrentHashMap<>();
   private static final String hostname = ServerUtils.hostname();
+  private static volatile boolean dumpConfig = true;
 
   // Private methods should never catch SQLException and then throw MetaException.  The public
   // methods depend on SQLException coming back so they can detect and handle deadlocks.  Private
@@ -250,6 +252,11 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
     retryLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HMSHANDLERATTEMPTS);
     deadlockRetryInterval = retryInterval / 10;
     maxOpenTxns = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_MAX_OPEN_TXNS);
+    if(dumpConfig) {
+      LOG.info(HiveConfUtil.dumpConfig(conf).toString());
+      //only do this once per JVM; useful for support
+      dumpConfig = false;
+    }
   }
 
   public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException {

http://git-wip-us.apache.org/repos/asf/hive/blob/21da5596/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java
index caab10d..0b7332c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java
@@ -48,7 +48,7 @@ public abstract class HouseKeeperServiceBase implements HouseKeeperService {
       private final AtomicInteger threadCounter = new AtomicInteger();
       @Override
       public Thread newThread(Runnable r) {
-        return new Thread(r, this.getClass().getName() + "-" + threadCounter.getAndIncrement());
+        return new Thread(r, HouseKeeperServiceBase.this.getClass().getName() + "-" + threadCounter.getAndIncrement());
       }
     });
 

http://git-wip-us.apache.org/repos/asf/hive/blob/21da5596/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index e4cbd5f..388b7c5 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -21,8 +21,15 @@ import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.LockState;
+import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager2;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService;
@@ -415,14 +422,31 @@ public class TestTxnCommands {
     hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 2, TimeUnit.MILLISECONDS);
     AcidHouseKeeperService houseKeeperService = new AcidHouseKeeperService();
     //this will abort the txn
-    houseKeeperService.start(hiveConf);
-    while(houseKeeperService.getIsAliveCounter() <= Integer.MIN_VALUE) {
-      Thread.sleep(100);//make sure it has run at least once
-    }
-    houseKeeperService.stop();
+    TestTxnCommands2.runHouseKeeperService(houseKeeperService, hiveConf);
     //this should fail because txn aborted due to timeout
     CommandProcessorResponse cpr = runStatementOnDriverNegative("delete from " + Table.ACIDTBL + " where a = 5");
     Assert.assertTrue("Actual: " + cpr.getErrorMessage(), cpr.getErrorMessage().contains("Transaction manager has aborted the transaction txnid:1"));
+    
+    //now test that we don't timeout locks we should not
+    hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 10, TimeUnit.MINUTES);
+    runStatementOnDriver("start transaction");
+    runStatementOnDriver("select count(*) from " + Table.ACIDTBL + " where a = 17");
+    TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
+    ShowLocksResponse slr = txnHandler.showLocks(new ShowLocksRequest());
+    TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks().get(0));
+    TestTxnCommands2.runHouseKeeperService(houseKeeperService, hiveConf);
+    slr = txnHandler.showLocks(new ShowLocksRequest());
+    TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks().get(0));
+    Assert.assertEquals("Unexpected lock count", 1, slr.getLocks().size());
+
+    TestTxnCommands2.runHouseKeeperService(houseKeeperService, hiveConf);
+    slr = txnHandler.showLocks(new ShowLocksRequest());
+    TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks().get(0));
+    Assert.assertEquals("Unexpected lock count", 1, slr.getLocks().size());
+
+    runStatementOnDriver("rollback");
+    slr = txnHandler.showLocks(new ShowLocksRequest());
+    Assert.assertEquals("Unexpected lock count", 0, slr.getLocks().size());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/21da5596/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 19cde2f..210ebde 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -45,6 +45,7 @@ import org.junit.Test;
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -630,8 +631,47 @@ public class TestDbTxnManager2 {
     relLocks.add(new DbLockManager.DbHiveLock(locks.get(0).getLockid()));
     txnMgr.getLockManager().releaseLocks(relLocks);
   }
+  /**
+   * Check to make sure we acquire proper locks for queries involving acid and non-acid tables
+   */
+  @Test
+  public void checkExpectedLocks2() throws Exception {
+    checkCmdOnDriver(driver.run("drop table if exists tab_acid"));
+    checkCmdOnDriver(driver.run("drop table if exists tab_not_acid"));
+    checkCmdOnDriver(driver.run("create table if not exists tab_acid (a int, b int) partitioned by (p string) " +
+      "clustered by (a) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true')"));
+    checkCmdOnDriver(driver.run("create table if not exists tab_not_acid (na int, nb int) partitioned by (np string) " +
+      "clustered by (na) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='false')"));
+    checkCmdOnDriver(driver.run("insert into tab_acid partition(p) (a,b,p) values(1,2,'foo'),(3,4,'bar')"));
+    checkCmdOnDriver(driver.run("insert into tab_not_acid partition(np) (na,nb,np) values(1,2,'blah'),(3,4,'doh')"));
+    txnMgr.openTxn("T1");
+    checkCmdOnDriver(driver.compileAndRespond("select * from tab_acid inner join tab_not_acid on a = na"));
+    txnMgr.acquireLocks(driver.getPlan(), ctx, "T1");
+    List<ShowLocksResponseElement> locks = getLocks(txnMgr, true);
+    Assert.assertEquals("Unexpected lock count", 6, locks.size());
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", null, locks.get(0));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=bar", locks.get(1));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=foo", locks.get(2));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", null, locks.get(3));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=blah", locks.get(4));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=doh", locks.get(5));
+
+    HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
+    txnMgr2.openTxn("T2");
+    checkCmdOnDriver(driver.compileAndRespond("insert into tab_not_acid partition(np='doh') values(5,6)"));
+    LockState ls = ((DbTxnManager)txnMgr2).acquireLocks(driver.getPlan(), ctx, "T2", false);
+    locks = getLocks(txnMgr2, true);
+    Assert.assertEquals("Unexpected lock count", 7, locks.size());
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", null, locks.get(0));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=bar", locks.get(1));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=foo", locks.get(2));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", null, locks.get(3));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=blah", locks.get(4));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=doh", locks.get(5));
+    checkLock(LockType.EXCLUSIVE, LockState.WAITING, "default", "tab_not_acid", "np=doh", locks.get(6));
+  }
 
-  private void checkLock(LockType expectedType, LockState expectedState, String expectedDb, String expectedTable, String expectedPartition, ShowLocksResponseElement actual) {
+  public static void checkLock(LockType expectedType, LockState expectedState, String expectedDb, String expectedTable, String expectedPartition, ShowLocksResponseElement actual) {
     Assert.assertEquals(actual.toString(), expectedType, actual.getType());
     Assert.assertEquals(actual.toString(), expectedState,actual.getState());
     Assert.assertEquals(actual.toString(), normalizeCase(expectedDb), normalizeCase(actual.getDbname()));
@@ -738,18 +778,90 @@ public class TestDbTxnManager2 {
   private void checkCmdOnDriver(CommandProcessorResponse cpr) {
     Assert.assertTrue(cpr.toString(), cpr.getResponseCode() == 0);
   }
-  private String normalizeCase(String s) {
+  private static String normalizeCase(String s) {
     return s == null ? null : s.toLowerCase();
   }
+
+  /**
+   * @deprecated use {@link #getLocks(boolean)}
+   */
   private List<ShowLocksResponseElement> getLocks() throws Exception {
-    return getLocks(this.txnMgr);
+    return getLocks(false);
   }
+  private List<ShowLocksResponseElement> getLocks(boolean sorted) throws Exception {
+    return getLocks(this.txnMgr, sorted);
+  }
+  /**
+   * @deprecated use {@link #getLocks(HiveTxnManager, boolean)}
+   */
   private List<ShowLocksResponseElement> getLocks(HiveTxnManager txnMgr) throws Exception {
+    return getLocks(txnMgr, false);
+  }
+
+  /**
+   * for some reason sort order of locks changes across different branches...
+   */
+  private List<ShowLocksResponseElement> getLocks(HiveTxnManager txnMgr, boolean sorted) throws Exception {
     ShowLocksResponse rsp = ((DbLockManager)txnMgr.getLockManager()).getLocks();
+    if(sorted) {
+      Collections.sort(rsp.getLocks(), new LockComparator());
+    }
     return rsp.getLocks();
   }
+  private static class LockComparator implements Comparator<ShowLocksResponseElement> {
+    @Override
+    public boolean equals(Object other) {
+      return other instanceof LockComparator;
+    }
+    //sort is not important except that it's consistent
+    @Override
+    public int compare(ShowLocksResponseElement p1, ShowLocksResponseElement p2) {
+      if(p1 == null && p2 == null) {
+        return 0;
+      }
+      if(p1 == null) {
+        return -1;
+      }
+      if(p2 == null) {
+        return 1;
+      }
+      int v = 0;
+      if((v = compare(p1.getDbname(), p2.getDbname())) != 0) {
+        return v;
+      }
+      if((v = compare(p1.getTablename(), p2.getTablename())) != 0) {
+        return v;
+      }
+      if((v = compare(p1.getPartname(), p2.getPartname())) != 0) {
+        return v;
+      }
+      if((v = p1.getType().getValue() - p2.getType().getValue()) != 0) {
+        return v;
+      }
+      if((v = p1.getState().getValue() - p2.getState().getValue()) != 0) {
+        return v;
+      }
+      //we should never get here (given current code)
+      if(p1.getLockid() == p2.getLockid()) {
+        return (int)(p1.getLockIdInternal() - p2.getLockIdInternal());
+      }
+      return (int)(p1.getLockid() - p2.getLockid());
+    }
+    private static int compare(String s1, String s2) {
+      if(s1 == null && s2 == null) {
+        return 0;
+      }
+      if(s1 == null) {
+        return -1;
+      }
+      if(s2 == null) {
+        return 1;
+      }
+      return s1.compareTo(s2);
+    }
+  }
 
-  /**
+    /**
    * txns update same resource but do not overlap in time - no conflict
    */
   @Test
@@ -1332,8 +1444,14 @@ public class TestDbTxnManager2 {
     Assert.assertEquals(TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"),
       1, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=1 and ctc_table='tab1'"));
   }
+
+  /**
+   * ToDo: multi-insert into txn table and non-tx table should be prevented
+   */
   @Test
   public void testMultiInsert() throws Exception {
+    checkCmdOnDriver(driver.run("drop table if exists tab1"));
+    checkCmdOnDriver(driver.run("drop table if exists tab_not_acid"));
     CommandProcessorResponse cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " +
       "clustered by (a) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true')");
     checkCmdOnDriver(cpr);