You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by dk...@apache.org on 2023/01/10 08:46:43 UTC

[hive] branch master updated: HIVE-11495: Add aborted reason to transaction information (Sourabh Badhya, reviewed by Denys Kuzmenko, Laszlo Vegh)

This is an automated email from the ASF dual-hosted git repository.

dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 3cba64f95da HIVE-11495: Add aborted reason to transaction information (Sourabh Badhya, reviewed by Denys Kuzmenko, Laszlo Vegh)
3cba64f95da is described below

commit 3cba64f95dace55c4422d9a4375eb266a7120175
Author: Sourabh Badhya <42...@users.noreply.github.com>
AuthorDate: Tue Jan 10 14:16:29 2023 +0530

    HIVE-11495: Add aborted reason to transaction information (Sourabh Badhya, reviewed by Denys Kuzmenko, Laszlo Vegh)
    
    Closes #3908
---
 .../metastore/SynchronizedMetaStoreClient.java     |   6 +-
 .../process/abort/AbortTransactionsOperation.java  |   3 +-
 .../hadoop/hive/ql/exec/repl/ReplDumpTask.java     |   7 +-
 .../hadoop/hive/ql/exec/repl/ReplLoadTask.java     |   3 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java       |   6 +-
 .../org/apache/hadoop/hive/ql/metadata/Hive.java   |  45 +++++----
 .../hadoop/hive/ql/txn/compactor/Worker.java       |   6 +-
 .../hive/ql/util/HiveStrictManagedMigration.java   |  10 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp    |  47 +++++++++
 .../src/gen/thrift/gen-cpp/hive_metastore_types.h  |  29 +++++-
 .../hadoop/hive/metastore/api/AbortTxnRequest.java | 106 +++++++++++++++++++-
 .../hive/metastore/api/AbortTxnsRequest.java       | 108 ++++++++++++++++++++-
 .../thrift/gen-php/metastore/AbortTxnRequest.php   |  24 +++++
 .../thrift/gen-php/metastore/AbortTxnsRequest.php  |  24 +++++
 .../src/gen/thrift/gen-py/hive_metastore/ttypes.py |  28 +++++-
 .../src/gen/thrift/gen-rb/hive_metastore_types.rb  |   8 +-
 .../hadoop/hive/metastore/HiveMetaStoreClient.java |  10 ++
 .../hadoop/hive/metastore/IMetaStoreClient.java    |  20 ++++
 .../hadoop/hive/metastore/hive_metastore.proto     |   2 +
 .../src/main/thrift/hive_metastore.thrift          |   2 +
 .../org/apache/hadoop/hive/metastore/Msck.java     |   9 +-
 .../hadoop/hive/metastore/txn/TxnErrorMsg.java     |  81 ++++++++++++++++
 .../hadoop/hive/metastore/txn/TxnHandler.java      |  42 +++++---
 .../metastore/HiveMetaStoreClientPreCatalog.java   |  10 ++
 .../apache/hive/streaming/TransactionBatch.java    |  10 +-
 25 files changed, 586 insertions(+), 60 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java
index d57f7c388c8..b7aa6fad9e1 100644
--- a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java
@@ -22,6 +22,7 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.CmRecycleResponse;
 import org.apache.hadoop.hive.metastore.api.CmRecycleRequest;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest;
 import org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
 import org.apache.thrift.TException;
 
 
@@ -62,7 +64,9 @@ public final class SynchronizedMetaStoreClient {
   }
 
   public synchronized void rollbackTxn(long txnid) throws TException {
-    client.rollbackTxn(txnid);
+    AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnid);
+    abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_ROLLBACK.getErrorCode());
+    client.rollbackTxn(abortTxnRequest);
   }
 
   public synchronized void heartbeat(long txnid, long lockid) throws TException {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/AbortTransactionsOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/AbortTransactionsOperation.java
index 8a02ba367c1..8250ac7efe5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/AbortTransactionsOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/AbortTransactionsOperation.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.ddl.process.abort;
 
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
 import org.apache.hadoop.hive.ql.ddl.DDLOperation;
 import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -32,7 +33,7 @@ public class AbortTransactionsOperation extends DDLOperation<AbortTransactionsDe
 
   @Override
   public int execute() throws HiveException {
-    context.getDb().abortTransactions(desc.getTransactionIds());
+    context.getDb().abortTransactions(desc.getTransactionIds(), TxnErrorMsg.ABORT_QUERY.getErrorCode());
     return 0;
   }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index 2996ab10ead..e7c329a5f25 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hive.metastore.messaging.event.filters.AndFilter;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.CatalogFilter;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.ReplEventFilter;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
 import org.apache.hadoop.hive.metastore.utils.StringUtils;
@@ -775,7 +776,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
     List<Long> txnsForDb = getOpenTxns(hiveTxnManager, hiveTxnManager.getValidTxns(excludedTxns), work.dbNameOrPattern);
     if (!txnsForDb.isEmpty()) {
       LOG.debug("Going to abort transactions: {} for database: {}.", txnsForDb, work.dbNameOrPattern);
-      hiveDb.abortTransactions(txnsForDb);
+      hiveDb.abortTransactions(txnsForDb, TxnErrorMsg.ABORT_FETCH_FAILOVER_METADATA.getErrorCode());
     }
     fmd.setAbortedTxns(txnsForDb);
     fmd.setCursorPoint(currentNotificationId(hiveDb));
@@ -786,7 +787,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
     txnsForDb = getOpenTxns(hiveTxnManager, allValidTxns, work.dbNameOrPattern);
     if (!txnsForDb.isEmpty()) {
       LOG.debug("Going to abort transactions: {} for database: {}.", txnsForDb, work.dbNameOrPattern);
-      hiveDb.abortTransactions(txnsForDb);
+      hiveDb.abortTransactions(txnsForDb, TxnErrorMsg.ABORT_FETCH_FAILOVER_METADATA.getErrorCode());
       fmd.addToAbortedTxns(txnsForDb);
     }
     fmd.setFailoverEventId(currentNotificationId(hiveDb));
@@ -1585,7 +1586,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
       List<Long> openTxns = getOpenTxns(hiveTxnManager, validTxnList, work.dbNameOrPattern);
       if (!openTxns.isEmpty()) {
         //abort only write transactions for the db under replication if abort transactions is enabled.
-        hiveDb.abortTransactions(openTxns);
+        hiveDb.abortTransactions(openTxns, TxnErrorMsg.ABORT_WRITE_TXN_AFTER_TIMEOUT.getErrorCode());
         validTxnList = hiveTxnManager.getValidTxns(excludedTxns);
         openTxns = getOpenTxns(hiveTxnManager, validTxnList, work.dbNameOrPattern);
         if (!openTxns.isEmpty()) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
index 7be785a97ee..e117e97bf1b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.hive.metastore.ReplChangeManager;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.ddl.database.alter.owner.AlterDatabaseSetOwnerDesc;
 import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc;
@@ -878,7 +879,7 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
     if (!openTxns.isEmpty()) {
       LOG.info("Rolling back write txns:" + openTxns.toString() + " for the database: " + dbName);
       //abort only write transactions for the current database if abort transactions is enabled.
-      hiveDb.abortTransactions(openTxns);
+      hiveDb.abortTransactions(openTxns, TxnErrorMsg.ABORT_ONGOING_TXN_FOR_TARGET_DB.getErrorCode());
       validTxnList = hiveTxnManager.getValidTxns(excludedTxns);
       openTxns = ReplUtils.getOpenTxns(hiveTxnManager, validTxnList, dbName);
       if (!openTxns.isEmpty()) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index de8b3a45df4..8fc77b09f0c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.LockComponentBuilder;
 import org.apache.hadoop.hive.metastore.LockRequestBuilder;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
 import org.apache.hadoop.hive.metastore.api.LockComponent;
 import org.apache.hadoop.hive.metastore.api.LockResponse;
 import org.apache.hadoop.hive.metastore.api.LockState;
@@ -42,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.DataOperationType;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.TxnType;
 import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryPlan;
@@ -593,7 +595,9 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
       if (replPolicy != null) {
         getMS().replRollbackTxn(txnId, replPolicy, TxnType.DEFAULT);
       } else {
-        getMS().rollbackTxn(txnId);
+        AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnId);
+        abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_ROLLBACK.getErrorCode());
+        getMS().rollbackTxn(abortTxnRequest);
       }
     } catch (NoSuchTxnException e) {
       LOG.error("Metastore could not find " + JavaUtils.txnIdToString(txnId));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index d9301c3ea4b..4acbb580b3a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -105,23 +105,7 @@ import org.apache.hadoop.hive.common.log.InPlaceUpdate;
 import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.CompactionRequest;
-import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest;
-import org.apache.hadoop.hive.metastore.api.GetTableRequest;
-import org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.ql.io.HdfsUtils;
-import org.apache.hadoop.hive.metastore.HiveMetaException;
-import org.apache.hadoop.hive.metastore.HiveMetaHook;
-import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.PartitionDropOptions;
-import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
-import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.AllTableConstraintsRequest;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
@@ -130,6 +114,7 @@ import org.apache.hadoop.hive.metastore.api.CmRecycleRequest;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
 import org.apache.hadoop.hive.metastore.api.CompactionResponse;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -143,6 +128,7 @@ import org.apache.hadoop.hive.metastore.api.FireEventRequestData;
 import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest;
 import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest;
 import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse;
 import org.apache.hadoop.hive.metastore.api.GetPartitionRequest;
@@ -151,6 +137,8 @@ import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest;
 import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
+import org.apache.hadoop.hive.metastore.api.GetTableRequest;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 import org.apache.hadoop.hive.metastore.api.HiveObjectType;
@@ -159,6 +147,7 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.Materialization;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
 import org.apache.hadoop.hive.metastore.api.PartitionSpec;
 import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD;
@@ -177,9 +166,11 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
+import org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMMapping;
 import org.apache.hadoop.hive.metastore.api.WMNullablePool;
@@ -190,11 +181,21 @@ import org.apache.hadoop.hive.metastore.api.WMTrigger;
 import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
 import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest;
 import org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.ql.io.HdfsUtils;
+import org.apache.hadoop.hive.metastore.HiveMetaException;
+import org.apache.hadoop.hive.metastore.HiveMetaHook;
+import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.PartitionDropOptions;
+import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
+import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.ReplChangeManager;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.ddl.database.drop.DropDatabaseDesc;
 import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator;
@@ -5940,9 +5941,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
     }
   }
 
-  public void abortTransactions(List<Long> txnids) throws HiveException {
+  public void abortTransactions(List<Long> txnids, long errorCode) throws HiveException {
+    AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(txnids);
+    abortTxnsRequest.setErrorCode(errorCode);
     try {
-      getMSC().abortTxns(txnids);
+      getMSC().abortTxns(abortTxnsRequest);
     } catch (Exception e) {
       LOG.error("Failed abortTransactions", e);
       throw new HiveException(e);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
index 7fa1cfca97b..74aa7a7be09 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
 import org.apache.hadoop.hive.metastore.MetaStoreThread;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.DataOperationType;
 import org.apache.hadoop.hive.metastore.api.FindNextCompactRequest;
@@ -42,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.TxnType;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.metrics.AcidMetricService;
 import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
 import org.apache.hadoop.hive.metastore.txn.TxnStatus;
 import org.apache.hadoop.hive.ql.io.AcidDirectory;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
@@ -653,7 +655,9 @@ public class Worker extends RemoteCompactorThread implements MetaStoreThread {
      */
     private void abort() throws TException {
       if (status == TxnStatus.OPEN) {
-        msc.abortTxns(Collections.singletonList(txnId));
+        AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnId);
+        abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_COMPACTION_TXN.getErrorCode());
+        msc.rollbackTxn(abortTxnRequest);
         status = TxnStatus.ABORTED;
       }
     }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java
index 50220751d0c..1dd9d8bf9db 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -59,6 +60,7 @@ import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
 import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.utils.HiveStrictManagedUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
@@ -1450,7 +1452,9 @@ public class HiveStrictManagedMigration {
           result = new TxnCtx(writeId, validWriteIds, txnId);
         } finally {
           if (result == null) {
-            msc.abortTxns(Lists.newArrayList(txnId));
+            AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnId);
+            abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode());
+            msc.rollbackTxn(abortTxnRequest);
           }
         }
         return result;
@@ -1466,7 +1470,9 @@ public class HiveStrictManagedMigration {
         if (isOk) {
           msc.commitTxn(txnCtx.txnId);
         } else {
-          msc.abortTxns(Lists.newArrayList(txnCtx.txnId));
+          AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnCtx.txnId);
+          abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode());
+          msc.rollbackTxn(abortTxnRequest);
         }
       } catch (TException ex) {
         throw new HiveException(ex);
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index a482833ebec..347ab314dd4 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -21896,6 +21896,11 @@ void AbortTxnRequest::__set_txn_type(const TxnType::type val) {
   this->txn_type = val;
 __isset.txn_type = true;
 }
+
+void AbortTxnRequest::__set_errorCode(const int64_t val) {
+  this->errorCode = val;
+__isset.errorCode = true;
+}
 std::ostream& operator<<(std::ostream& out, const AbortTxnRequest& obj)
 {
   obj.printTo(out);
@@ -21951,6 +21956,14 @@ uint32_t AbortTxnRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
           xfer += iprot->skip(ftype);
         }
         break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->errorCode);
+          this->__isset.errorCode = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -21984,6 +21997,11 @@ uint32_t AbortTxnRequest::write(::apache::thrift::protocol::TProtocol* oprot) co
     xfer += oprot->writeI32(static_cast<int32_t>(this->txn_type));
     xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.errorCode) {
+    xfer += oprot->writeFieldBegin("errorCode", ::apache::thrift::protocol::T_I64, 4);
+    xfer += oprot->writeI64(this->errorCode);
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -21994,6 +22012,7 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b) {
   swap(a.txnid, b.txnid);
   swap(a.replPolicy, b.replPolicy);
   swap(a.txn_type, b.txn_type);
+  swap(a.errorCode, b.errorCode);
   swap(a.__isset, b.__isset);
 }
 
@@ -22001,12 +22020,14 @@ AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other818) {
   txnid = other818.txnid;
   replPolicy = other818.replPolicy;
   txn_type = other818.txn_type;
+  errorCode = other818.errorCode;
   __isset = other818.__isset;
 }
 AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other819) {
   txnid = other819.txnid;
   replPolicy = other819.replPolicy;
   txn_type = other819.txn_type;
+  errorCode = other819.errorCode;
   __isset = other819.__isset;
   return *this;
 }
@@ -22016,6 +22037,7 @@ void AbortTxnRequest::printTo(std::ostream& out) const {
   out << "txnid=" << to_string(txnid);
   out << ", " << "replPolicy="; (__isset.replPolicy ? (out << to_string(replPolicy)) : (out << "<null>"));
   out << ", " << "txn_type="; (__isset.txn_type ? (out << to_string(txn_type)) : (out << "<null>"));
+  out << ", " << "errorCode="; (__isset.errorCode ? (out << to_string(errorCode)) : (out << "<null>"));
   out << ")";
 }
 
@@ -22027,6 +22049,11 @@ AbortTxnsRequest::~AbortTxnsRequest() noexcept {
 void AbortTxnsRequest::__set_txn_ids(const std::vector<int64_t> & val) {
   this->txn_ids = val;
 }
+
+void AbortTxnsRequest::__set_errorCode(const int64_t val) {
+  this->errorCode = val;
+__isset.errorCode = true;
+}
 std::ostream& operator<<(std::ostream& out, const AbortTxnsRequest& obj)
 {
   obj.printTo(out);
@@ -22076,6 +22103,14 @@ uint32_t AbortTxnsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
           xfer += iprot->skip(ftype);
         }
         break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->errorCode);
+          this->__isset.errorCode = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -22107,6 +22142,11 @@ uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) c
   }
   xfer += oprot->writeFieldEnd();
 
+  if (this->__isset.errorCode) {
+    xfer += oprot->writeFieldBegin("errorCode", ::apache::thrift::protocol::T_I64, 2);
+    xfer += oprot->writeI64(this->errorCode);
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -22115,19 +22155,26 @@ uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) c
 void swap(AbortTxnsRequest &a, AbortTxnsRequest &b) {
   using ::std::swap;
   swap(a.txn_ids, b.txn_ids);
+  swap(a.errorCode, b.errorCode);
+  swap(a.__isset, b.__isset);
 }
 
 AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other826) {
   txn_ids = other826.txn_ids;
+  errorCode = other826.errorCode;
+  __isset = other826.__isset;
 }
 AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other827) {
   txn_ids = other827.txn_ids;
+  errorCode = other827.errorCode;
+  __isset = other827.__isset;
   return *this;
 }
 void AbortTxnsRequest::printTo(std::ostream& out) const {
   using ::apache::thrift::to_string;
   out << "AbortTxnsRequest(";
   out << "txn_ids=" << to_string(txn_ids);
+  out << ", " << "errorCode="; (__isset.errorCode ? (out << to_string(errorCode)) : (out << "<null>"));
   out << ")";
 }
 
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
index bac4f87320d..e52a356e36c 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -8793,9 +8793,10 @@ void swap(OpenTxnsResponse &a, OpenTxnsResponse &b);
 std::ostream& operator<<(std::ostream& out, const OpenTxnsResponse& obj);
 
 typedef struct _AbortTxnRequest__isset {
-  _AbortTxnRequest__isset() : replPolicy(false), txn_type(false) {}
+  _AbortTxnRequest__isset() : replPolicy(false), txn_type(false), errorCode(false) {}
   bool replPolicy :1;
   bool txn_type :1;
+  bool errorCode :1;
 } _AbortTxnRequest__isset;
 
 class AbortTxnRequest : public virtual ::apache::thrift::TBase {
@@ -8806,7 +8807,8 @@ class AbortTxnRequest : public virtual ::apache::thrift::TBase {
   AbortTxnRequest() noexcept
                   : txnid(0),
                     replPolicy(),
-                    txn_type(static_cast<TxnType::type>(0)) {
+                    txn_type(static_cast<TxnType::type>(0)),
+                    errorCode(0) {
   }
 
   virtual ~AbortTxnRequest() noexcept;
@@ -8817,6 +8819,7 @@ class AbortTxnRequest : public virtual ::apache::thrift::TBase {
    * @see TxnType
    */
   TxnType::type txn_type;
+  int64_t errorCode;
 
   _AbortTxnRequest__isset __isset;
 
@@ -8826,6 +8829,8 @@ class AbortTxnRequest : public virtual ::apache::thrift::TBase {
 
   void __set_txn_type(const TxnType::type val);
 
+  void __set_errorCode(const int64_t val);
+
   bool operator == (const AbortTxnRequest & rhs) const
   {
     if (!(txnid == rhs.txnid))
@@ -8838,6 +8843,10 @@ class AbortTxnRequest : public virtual ::apache::thrift::TBase {
       return false;
     else if (__isset.txn_type && !(txn_type == rhs.txn_type))
       return false;
+    if (__isset.errorCode != rhs.__isset.errorCode)
+      return false;
+    else if (__isset.errorCode && !(errorCode == rhs.errorCode))
+      return false;
     return true;
   }
   bool operator != (const AbortTxnRequest &rhs) const {
@@ -8856,24 +8865,38 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b);
 
 std::ostream& operator<<(std::ostream& out, const AbortTxnRequest& obj);
 
+typedef struct _AbortTxnsRequest__isset {
+  _AbortTxnsRequest__isset() : errorCode(false) {}
+  bool errorCode :1;
+} _AbortTxnsRequest__isset;
 
 class AbortTxnsRequest : public virtual ::apache::thrift::TBase {
  public:
 
   AbortTxnsRequest(const AbortTxnsRequest&);
   AbortTxnsRequest& operator=(const AbortTxnsRequest&);
-  AbortTxnsRequest() noexcept {
+  AbortTxnsRequest() noexcept
+                   : errorCode(0) {
   }
 
   virtual ~AbortTxnsRequest() noexcept;
   std::vector<int64_t>  txn_ids;
+  int64_t errorCode;
+
+  _AbortTxnsRequest__isset __isset;
 
   void __set_txn_ids(const std::vector<int64_t> & val);
 
+  void __set_errorCode(const int64_t val);
+
   bool operator == (const AbortTxnsRequest & rhs) const
   {
     if (!(txn_ids == rhs.txn_ids))
       return false;
+    if (__isset.errorCode != rhs.__isset.errorCode)
+      return false;
+    else if (__isset.errorCode && !(errorCode == rhs.errorCode))
+      return false;
     return true;
   }
   bool operator != (const AbortTxnsRequest &rhs) const {
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java
index 37311fc02a6..a45233e0281 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java
@@ -14,6 +14,7 @@ package org.apache.hadoop.hive.metastore.api;
   private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)1);
   private static final org.apache.thrift.protocol.TField REPL_POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("replPolicy", org.apache.thrift.protocol.TType.STRING, (short)2);
   private static final org.apache.thrift.protocol.TField TXN_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_type", org.apache.thrift.protocol.TType.I32, (short)3);
+  private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I64, (short)4);
 
   private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new AbortTxnRequestStandardSchemeFactory();
   private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new AbortTxnRequestTupleSchemeFactory();
@@ -21,6 +22,7 @@ package org.apache.hadoop.hive.metastore.api;
   private long txnid; // required
   private @org.apache.thrift.annotation.Nullable java.lang.String replPolicy; // optional
   private @org.apache.thrift.annotation.Nullable TxnType txn_type; // optional
+  private long errorCode; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -30,7 +32,8 @@ package org.apache.hadoop.hive.metastore.api;
      * 
      * @see TxnType
      */
-    TXN_TYPE((short)3, "txn_type");
+    TXN_TYPE((short)3, "txn_type"),
+    ERROR_CODE((short)4, "errorCode");
 
     private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
 
@@ -52,6 +55,8 @@ package org.apache.hadoop.hive.metastore.api;
           return REPL_POLICY;
         case 3: // TXN_TYPE
           return TXN_TYPE;
+        case 4: // ERROR_CODE
+          return ERROR_CODE;
         default:
           return null;
       }
@@ -94,8 +99,9 @@ package org.apache.hadoop.hive.metastore.api;
 
   // isset id assignments
   private static final int __TXNID_ISSET_ID = 0;
+  private static final int __ERRORCODE_ISSET_ID = 1;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.REPL_POLICY,_Fields.TXN_TYPE};
+  private static final _Fields optionals[] = {_Fields.REPL_POLICY,_Fields.TXN_TYPE,_Fields.ERROR_CODE};
   public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -105,6 +111,8 @@ package org.apache.hadoop.hive.metastore.api;
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     tmpMap.put(_Fields.TXN_TYPE, new org.apache.thrift.meta_data.FieldMetaData("txn_type", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TxnType.class)));
+    tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AbortTxnRequest.class, metaDataMap);
   }
@@ -132,6 +140,7 @@ package org.apache.hadoop.hive.metastore.api;
     if (other.isSetTxn_type()) {
       this.txn_type = other.txn_type;
     }
+    this.errorCode = other.errorCode;
   }
 
   public AbortTxnRequest deepCopy() {
@@ -144,6 +153,8 @@ package org.apache.hadoop.hive.metastore.api;
     this.txnid = 0;
     this.replPolicy = null;
     this.txn_type = null;
+    setErrorCodeIsSet(false);
+    this.errorCode = 0;
   }
 
   public long getTxnid() {
@@ -224,6 +235,28 @@ package org.apache.hadoop.hive.metastore.api;
     }
   }
 
+  public long getErrorCode() {
+    return this.errorCode;
+  }
+
+  public void setErrorCode(long errorCode) {
+    this.errorCode = errorCode;
+    setErrorCodeIsSet(true);
+  }
+
+  public void unsetErrorCode() {
+    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __ERRORCODE_ISSET_ID);
+  }
+
+  /** Returns true if field errorCode is set (has been assigned a value) and false otherwise */
+  public boolean isSetErrorCode() {
+    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __ERRORCODE_ISSET_ID);
+  }
+
+  public void setErrorCodeIsSet(boolean value) {
+    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ERRORCODE_ISSET_ID, value);
+  }
+
   public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
     switch (field) {
     case TXNID:
@@ -250,6 +283,14 @@ package org.apache.hadoop.hive.metastore.api;
       }
       break;
 
+    case ERROR_CODE:
+      if (value == null) {
+        unsetErrorCode();
+      } else {
+        setErrorCode((java.lang.Long)value);
+      }
+      break;
+
     }
   }
 
@@ -265,6 +306,9 @@ package org.apache.hadoop.hive.metastore.api;
     case TXN_TYPE:
       return getTxn_type();
 
+    case ERROR_CODE:
+      return getErrorCode();
+
     }
     throw new java.lang.IllegalStateException();
   }
@@ -282,6 +326,8 @@ package org.apache.hadoop.hive.metastore.api;
       return isSetReplPolicy();
     case TXN_TYPE:
       return isSetTxn_type();
+    case ERROR_CODE:
+      return isSetErrorCode();
     }
     throw new java.lang.IllegalStateException();
   }
@@ -326,6 +372,15 @@ package org.apache.hadoop.hive.metastore.api;
         return false;
     }
 
+    boolean this_present_errorCode = true && this.isSetErrorCode();
+    boolean that_present_errorCode = true && that.isSetErrorCode();
+    if (this_present_errorCode || that_present_errorCode) {
+      if (!(this_present_errorCode && that_present_errorCode))
+        return false;
+      if (this.errorCode != that.errorCode)
+        return false;
+    }
+
     return true;
   }
 
@@ -343,6 +398,10 @@ package org.apache.hadoop.hive.metastore.api;
     if (isSetTxn_type())
       hashCode = hashCode * 8191 + txn_type.getValue();
 
+    hashCode = hashCode * 8191 + ((isSetErrorCode()) ? 131071 : 524287);
+    if (isSetErrorCode())
+      hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(errorCode);
+
     return hashCode;
   }
 
@@ -384,6 +443,16 @@ package org.apache.hadoop.hive.metastore.api;
         return lastComparison;
       }
     }
+    lastComparison = java.lang.Boolean.compare(isSetErrorCode(), other.isSetErrorCode());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetErrorCode()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorCode, other.errorCode);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -428,6 +497,12 @@ package org.apache.hadoop.hive.metastore.api;
       }
       first = false;
     }
+    if (isSetErrorCode()) {
+      if (!first) sb.append(", ");
+      sb.append("errorCode:");
+      sb.append(this.errorCode);
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -501,6 +576,14 @@ package org.apache.hadoop.hive.metastore.api;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 4: // ERROR_CODE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.errorCode = iprot.readI64();
+              struct.setErrorCodeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -531,6 +614,11 @@ package org.apache.hadoop.hive.metastore.api;
           oprot.writeFieldEnd();
         }
       }
+      if (struct.isSetErrorCode()) {
+        oprot.writeFieldBegin(ERROR_CODE_FIELD_DESC);
+        oprot.writeI64(struct.errorCode);
+        oprot.writeFieldEnd();
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -556,13 +644,19 @@ package org.apache.hadoop.hive.metastore.api;
       if (struct.isSetTxn_type()) {
         optionals.set(1);
       }
-      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetErrorCode()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
       if (struct.isSetReplPolicy()) {
         oprot.writeString(struct.replPolicy);
       }
       if (struct.isSetTxn_type()) {
         oprot.writeI32(struct.txn_type.getValue());
       }
+      if (struct.isSetErrorCode()) {
+        oprot.writeI64(struct.errorCode);
+      }
     }
 
     @Override
@@ -570,7 +664,7 @@ package org.apache.hadoop.hive.metastore.api;
       org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
       struct.txnid = iprot.readI64();
       struct.setTxnidIsSet(true);
-      java.util.BitSet incoming = iprot.readBitSet(2);
+      java.util.BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         struct.replPolicy = iprot.readString();
         struct.setReplPolicyIsSet(true);
@@ -579,6 +673,10 @@ package org.apache.hadoop.hive.metastore.api;
         struct.txn_type = org.apache.hadoop.hive.metastore.api.TxnType.findByValue(iprot.readI32());
         struct.setTxn_typeIsSet(true);
       }
+      if (incoming.get(2)) {
+        struct.errorCode = iprot.readI64();
+        struct.setErrorCodeIsSet(true);
+      }
     }
   }
 
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
index 09e9808a0ed..249ea633cba 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
@@ -12,15 +12,18 @@ package org.apache.hadoop.hive.metastore.api;
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AbortTxnsRequest");
 
   private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_ids", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I64, (short)2);
 
   private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new AbortTxnsRequestStandardSchemeFactory();
   private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new AbortTxnsRequestTupleSchemeFactory();
 
   private @org.apache.thrift.annotation.Nullable java.util.List<java.lang.Long> txn_ids; // required
+  private long errorCode; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    TXN_IDS((short)1, "txn_ids");
+    TXN_IDS((short)1, "txn_ids"),
+    ERROR_CODE((short)2, "errorCode");
 
     private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
 
@@ -38,6 +41,8 @@ package org.apache.hadoop.hive.metastore.api;
       switch(fieldId) {
         case 1: // TXN_IDS
           return TXN_IDS;
+        case 2: // ERROR_CODE
+          return ERROR_CODE;
         default:
           return null;
       }
@@ -79,12 +84,17 @@ package org.apache.hadoop.hive.metastore.api;
   }
 
   // isset id assignments
+  private static final int __ERRORCODE_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.ERROR_CODE};
   public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
     tmpMap.put(_Fields.TXN_IDS, new org.apache.thrift.meta_data.FieldMetaData("txn_ids", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AbortTxnsRequest.class, metaDataMap);
   }
@@ -103,10 +113,12 @@ package org.apache.hadoop.hive.metastore.api;
    * Performs a deep copy on <i>other</i>.
    */
   public AbortTxnsRequest(AbortTxnsRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
     if (other.isSetTxn_ids()) {
       java.util.List<java.lang.Long> __this__txn_ids = new java.util.ArrayList<java.lang.Long>(other.txn_ids);
       this.txn_ids = __this__txn_ids;
     }
+    this.errorCode = other.errorCode;
   }
 
   public AbortTxnsRequest deepCopy() {
@@ -116,6 +128,8 @@ package org.apache.hadoop.hive.metastore.api;
   @Override
   public void clear() {
     this.txn_ids = null;
+    setErrorCodeIsSet(false);
+    this.errorCode = 0;
   }
 
   public int getTxn_idsSize() {
@@ -158,6 +172,28 @@ package org.apache.hadoop.hive.metastore.api;
     }
   }
 
+  public long getErrorCode() {
+    return this.errorCode;
+  }
+
+  public void setErrorCode(long errorCode) {
+    this.errorCode = errorCode;
+    setErrorCodeIsSet(true);
+  }
+
+  public void unsetErrorCode() {
+    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __ERRORCODE_ISSET_ID);
+  }
+
+  /** Returns true if field errorCode is set (has been assigned a value) and false otherwise */
+  public boolean isSetErrorCode() {
+    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __ERRORCODE_ISSET_ID);
+  }
+
+  public void setErrorCodeIsSet(boolean value) {
+    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ERRORCODE_ISSET_ID, value);
+  }
+
   public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
     switch (field) {
     case TXN_IDS:
@@ -168,6 +204,14 @@ package org.apache.hadoop.hive.metastore.api;
       }
       break;
 
+    case ERROR_CODE:
+      if (value == null) {
+        unsetErrorCode();
+      } else {
+        setErrorCode((java.lang.Long)value);
+      }
+      break;
+
     }
   }
 
@@ -177,6 +221,9 @@ package org.apache.hadoop.hive.metastore.api;
     case TXN_IDS:
       return getTxn_ids();
 
+    case ERROR_CODE:
+      return getErrorCode();
+
     }
     throw new java.lang.IllegalStateException();
   }
@@ -190,6 +237,8 @@ package org.apache.hadoop.hive.metastore.api;
     switch (field) {
     case TXN_IDS:
       return isSetTxn_ids();
+    case ERROR_CODE:
+      return isSetErrorCode();
     }
     throw new java.lang.IllegalStateException();
   }
@@ -216,6 +265,15 @@ package org.apache.hadoop.hive.metastore.api;
         return false;
     }
 
+    boolean this_present_errorCode = true && this.isSetErrorCode();
+    boolean that_present_errorCode = true && that.isSetErrorCode();
+    if (this_present_errorCode || that_present_errorCode) {
+      if (!(this_present_errorCode && that_present_errorCode))
+        return false;
+      if (this.errorCode != that.errorCode)
+        return false;
+    }
+
     return true;
   }
 
@@ -227,6 +285,10 @@ package org.apache.hadoop.hive.metastore.api;
     if (isSetTxn_ids())
       hashCode = hashCode * 8191 + txn_ids.hashCode();
 
+    hashCode = hashCode * 8191 + ((isSetErrorCode()) ? 131071 : 524287);
+    if (isSetErrorCode())
+      hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(errorCode);
+
     return hashCode;
   }
 
@@ -248,6 +310,16 @@ package org.apache.hadoop.hive.metastore.api;
         return lastComparison;
       }
     }
+    lastComparison = java.lang.Boolean.compare(isSetErrorCode(), other.isSetErrorCode());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetErrorCode()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorCode, other.errorCode);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -276,6 +348,12 @@ package org.apache.hadoop.hive.metastore.api;
       sb.append(this.txn_ids);
     }
     first = false;
+    if (isSetErrorCode()) {
+      if (!first) sb.append(", ");
+      sb.append("errorCode:");
+      sb.append(this.errorCode);
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -299,6 +377,8 @@ package org.apache.hadoop.hive.metastore.api;
 
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
     try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
     } catch (org.apache.thrift.TException te) {
       throw new java.io.IOException(te);
@@ -341,6 +421,14 @@ package org.apache.hadoop.hive.metastore.api;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 2: // ERROR_CODE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.errorCode = iprot.readI64();
+              struct.setErrorCodeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -366,6 +454,11 @@ package org.apache.hadoop.hive.metastore.api;
         }
         oprot.writeFieldEnd();
       }
+      if (struct.isSetErrorCode()) {
+        oprot.writeFieldBegin(ERROR_CODE_FIELD_DESC);
+        oprot.writeI64(struct.errorCode);
+        oprot.writeFieldEnd();
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -390,6 +483,14 @@ package org.apache.hadoop.hive.metastore.api;
           oprot.writeI64(_iter746);
         }
       }
+      java.util.BitSet optionals = new java.util.BitSet();
+      if (struct.isSetErrorCode()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetErrorCode()) {
+        oprot.writeI64(struct.errorCode);
+      }
     }
 
     @Override
@@ -406,6 +507,11 @@ package org.apache.hadoop.hive.metastore.api;
         }
       }
       struct.setTxn_idsIsSet(true);
+      java.util.BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.errorCode = iprot.readI64();
+        struct.setErrorCodeIsSet(true);
+      }
     }
   }
 
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnRequest.php
index 589d5d4a5da..399aeaf626c 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnRequest.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnRequest.php
@@ -37,6 +37,11 @@ class AbortTxnRequest
             'type' => TType::I32,
             'class' => '\metastore\TxnType',
         ),
+        4 => array(
+            'var' => 'errorCode',
+            'isRequired' => false,
+            'type' => TType::I64,
+        ),
     );
 
     /**
@@ -51,6 +56,10 @@ class AbortTxnRequest
      * @var int
      */
     public $txn_type = null;
+    /**
+     * @var int
+     */
+    public $errorCode = null;
 
     public function __construct($vals = null)
     {
@@ -64,6 +73,9 @@ class AbortTxnRequest
             if (isset($vals['txn_type'])) {
                 $this->txn_type = $vals['txn_type'];
             }
+            if (isset($vals['errorCode'])) {
+                $this->errorCode = $vals['errorCode'];
+            }
         }
     }
 
@@ -107,6 +119,13 @@ class AbortTxnRequest
                         $xfer += $input->skip($ftype);
                     }
                     break;
+                case 4:
+                    if ($ftype == TType::I64) {
+                        $xfer += $input->readI64($this->errorCode);
+                    } else {
+                        $xfer += $input->skip($ftype);
+                    }
+                    break;
                 default:
                     $xfer += $input->skip($ftype);
                     break;
@@ -136,6 +155,11 @@ class AbortTxnRequest
             $xfer += $output->writeI32($this->txn_type);
             $xfer += $output->writeFieldEnd();
         }
+        if ($this->errorCode !== null) {
+            $xfer += $output->writeFieldBegin('errorCode', TType::I64, 4);
+            $xfer += $output->writeI64($this->errorCode);
+            $xfer += $output->writeFieldEnd();
+        }
         $xfer += $output->writeFieldStop();
         $xfer += $output->writeStructEnd();
         return $xfer;
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnsRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnsRequest.php
index 06947324321..ca1519f36af 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnsRequest.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnsRequest.php
@@ -30,12 +30,21 @@ class AbortTxnsRequest
                 'type' => TType::I64,
                 ),
         ),
+        2 => array(
+            'var' => 'errorCode',
+            'isRequired' => false,
+            'type' => TType::I64,
+        ),
     );
 
     /**
      * @var int[]
      */
     public $txn_ids = null;
+    /**
+     * @var int
+     */
+    public $errorCode = null;
 
     public function __construct($vals = null)
     {
@@ -43,6 +52,9 @@ class AbortTxnsRequest
             if (isset($vals['txn_ids'])) {
                 $this->txn_ids = $vals['txn_ids'];
             }
+            if (isset($vals['errorCode'])) {
+                $this->errorCode = $vals['errorCode'];
+            }
         }
     }
 
@@ -81,6 +93,13 @@ class AbortTxnsRequest
                         $xfer += $input->skip($ftype);
                     }
                     break;
+                case 2:
+                    if ($ftype == TType::I64) {
+                        $xfer += $input->readI64($this->errorCode);
+                    } else {
+                        $xfer += $input->skip($ftype);
+                    }
+                    break;
                 default:
                     $xfer += $input->skip($ftype);
                     break;
@@ -107,6 +126,11 @@ class AbortTxnsRequest
             $output->writeListEnd();
             $xfer += $output->writeFieldEnd();
         }
+        if ($this->errorCode !== null) {
+            $xfer += $output->writeFieldBegin('errorCode', TType::I64, 2);
+            $xfer += $output->writeI64($this->errorCode);
+            $xfer += $output->writeFieldEnd();
+        }
         $xfer += $output->writeFieldStop();
         $xfer += $output->writeStructEnd();
         return $xfer;
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 5f0c58a0ed7..abfef9c9d34 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -12520,14 +12520,16 @@ class AbortTxnRequest(object):
      - txnid
      - replPolicy
      - txn_type
+     - errorCode
 
     """
 
 
-    def __init__(self, txnid=None, replPolicy=None, txn_type=None,):
+    def __init__(self, txnid=None, replPolicy=None, txn_type=None, errorCode=None,):
         self.txnid = txnid
         self.replPolicy = replPolicy
         self.txn_type = txn_type
+        self.errorCode = errorCode
 
     def read(self, iprot):
         if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
@@ -12553,6 +12555,11 @@ class AbortTxnRequest(object):
                     self.txn_type = iprot.readI32()
                 else:
                     iprot.skip(ftype)
+            elif fid == 4:
+                if ftype == TType.I64:
+                    self.errorCode = iprot.readI64()
+                else:
+                    iprot.skip(ftype)
             else:
                 iprot.skip(ftype)
             iprot.readFieldEnd()
@@ -12575,6 +12582,10 @@ class AbortTxnRequest(object):
             oprot.writeFieldBegin('txn_type', TType.I32, 3)
             oprot.writeI32(self.txn_type)
             oprot.writeFieldEnd()
+        if self.errorCode is not None:
+            oprot.writeFieldBegin('errorCode', TType.I64, 4)
+            oprot.writeI64(self.errorCode)
+            oprot.writeFieldEnd()
         oprot.writeFieldStop()
         oprot.writeStructEnd()
 
@@ -12599,12 +12610,14 @@ class AbortTxnsRequest(object):
     """
     Attributes:
      - txn_ids
+     - errorCode
 
     """
 
 
-    def __init__(self, txn_ids=None,):
+    def __init__(self, txn_ids=None, errorCode=None,):
         self.txn_ids = txn_ids
+        self.errorCode = errorCode
 
     def read(self, iprot):
         if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
@@ -12625,6 +12638,11 @@ class AbortTxnsRequest(object):
                     iprot.readListEnd()
                 else:
                     iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.I64:
+                    self.errorCode = iprot.readI64()
+                else:
+                    iprot.skip(ftype)
             else:
                 iprot.skip(ftype)
             iprot.readFieldEnd()
@@ -12642,6 +12660,10 @@ class AbortTxnsRequest(object):
                 oprot.writeI64(iter659)
             oprot.writeListEnd()
             oprot.writeFieldEnd()
+        if self.errorCode is not None:
+            oprot.writeFieldBegin('errorCode', TType.I64, 2)
+            oprot.writeI64(self.errorCode)
+            oprot.writeFieldEnd()
         oprot.writeFieldStop()
         oprot.writeStructEnd()
 
@@ -31011,11 +31033,13 @@ AbortTxnRequest.thrift_spec = (
     (1, TType.I64, 'txnid', None, None, ),  # 1
     (2, TType.STRING, 'replPolicy', 'UTF8', None, ),  # 2
     (3, TType.I32, 'txn_type', None, None, ),  # 3
+    (4, TType.I64, 'errorCode', None, None, ),  # 4
 )
 all_structs.append(AbortTxnsRequest)
 AbortTxnsRequest.thrift_spec = (
     None,  # 0
     (1, TType.LIST, 'txn_ids', (TType.I64, None, False), None, ),  # 1
+    (2, TType.I64, 'errorCode', None, None, ),  # 2
 )
 all_structs.append(CommitTxnKeyValue)
 CommitTxnKeyValue.thrift_spec = (
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index c30dd8203f5..c5137c0dd13 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -3746,11 +3746,13 @@ class AbortTxnRequest
   TXNID = 1
   REPLPOLICY = 2
   TXN_TYPE = 3
+  ERRORCODE = 4
 
   FIELDS = {
     TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid'},
     REPLPOLICY => {:type => ::Thrift::Types::STRING, :name => 'replPolicy', :optional => true},
-    TXN_TYPE => {:type => ::Thrift::Types::I32, :name => 'txn_type', :optional => true, :enum_class => ::TxnType}
+    TXN_TYPE => {:type => ::Thrift::Types::I32, :name => 'txn_type', :optional => true, :enum_class => ::TxnType},
+    ERRORCODE => {:type => ::Thrift::Types::I64, :name => 'errorCode', :optional => true}
   }
 
   def struct_fields; FIELDS; end
@@ -3768,9 +3770,11 @@ end
 class AbortTxnsRequest
   include ::Thrift::Struct, ::Thrift::Struct_Union
   TXN_IDS = 1
+  ERRORCODE = 2
 
   FIELDS = {
-    TXN_IDS => {:type => ::Thrift::Types::LIST, :name => 'txn_ids', :element => {:type => ::Thrift::Types::I64}}
+    TXN_IDS => {:type => ::Thrift::Types::LIST, :name => 'txn_ids', :element => {:type => ::Thrift::Types::I64}},
+    ERRORCODE => {:type => ::Thrift::Types::I64, :name => 'errorCode', :optional => true}
   }
 
   def struct_fields; FIELDS; end
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 43ca7619fa1..00879bc3174 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -4004,6 +4004,11 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     client.abort_txn(new AbortTxnRequest(txnid));
   }
 
+  @Override
+  public void rollbackTxn(AbortTxnRequest abortTxnRequest) throws NoSuchTxnException, TException {
+    client.abort_txn(abortTxnRequest);
+  }
+
   @Override
   public void replRollbackTxn(long srcTxnId, String replPolicy, TxnType txnType) throws NoSuchTxnException, TException {
     AbortTxnRequest rqst = new AbortTxnRequest(srcTxnId);
@@ -4048,6 +4053,11 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     client.abort_txns(new AbortTxnsRequest(txnids));
   }
 
+  @Override
+  public void abortTxns(AbortTxnsRequest abortTxnsRequest) throws NoSuchTxnException, TException {
+    client.abort_txns(abortTxnsRequest);
+  }
+
   @Override
   public void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List<String> partNames)
           throws TException {
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 5d31161db3b..0ada3af0074 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -3150,6 +3150,18 @@ public interface IMetaStoreClient {
    */
   void rollbackTxn(long txnid) throws NoSuchTxnException, TException;
 
+  /**
+   * Rollback a transaction.  This will also unlock any locks associated with
+   * this transaction.
+   * @param abortTxnRequest AbortTxnRequest object containing transaction id and
+   * error codes.
+   * @throws NoSuchTxnException if the requested transaction does not exist.
+   * Note that this can result from the transaction having timed out and been
+   * deleted.
+   * @throws TException
+   */
+  void rollbackTxn(AbortTxnRequest abortTxnRequest) throws NoSuchTxnException, TException;
+
   /**
    * Rollback a transaction.  This will also unlock any locks associated with
    * this transaction.
@@ -3227,6 +3239,14 @@ public interface IMetaStoreClient {
    */
   void abortTxns(List<Long> txnids) throws TException;
 
+  /**
+   * Abort a list of transactions with additional information of
+   * errorcodes as defined in TxnErrorMsg.java.
+   * @param abortTxnsRequest Information containing txnIds and error codes
+   * @throws TException
+   */
+  void abortTxns(AbortTxnsRequest abortTxnsRequest) throws TException;
+
   /**
    * Allocate a per table write ID and associate it with the given transaction.
    * @param txnId id of transaction to which the allocated write ID to be associated.
diff --git a/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto b/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto
index 56cf6c9109b..8ef58bc2285 100644
--- a/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto
+++ b/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto
@@ -1966,6 +1966,7 @@ message AbortTxnRequest {
   int64 txnid = 1;
   string repl_policy = 2;
   TxnType txn_type = 3;
+  optional int64 errorCode = 4;
 }
 
 message AbortTxnResponse {
@@ -1973,6 +1974,7 @@ message AbortTxnResponse {
 
 message AbortTxnsRequest {
   repeated int64 txn_ids = 1;
+  optional int64 errorCode = 2;
 }
 
 message AbortTxnsResponse {
diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 78bf0b7466a..87595f632fe 100644
--- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -1076,10 +1076,12 @@ struct AbortTxnRequest {
     1: required i64 txnid,
     2: optional string replPolicy,
     3: optional TxnType txn_type,
+    4: optional i64 errorCode,
 }
 
 struct AbortTxnsRequest {
     1: required list<i64> txn_ids,
+    2: optional i64 errorCode,
 }
 
 struct CommitTxnKeyValue {
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java
index ea4f57a4d2b..926875e514c 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java
@@ -36,6 +36,7 @@ import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
 import org.apache.hadoop.hive.metastore.api.DataOperationType;
 import org.apache.hadoop.hive.metastore.api.LockRequest;
 import org.apache.hadoop.hive.metastore.api.LockResponse;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hive.metastore.api.MetastoreException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.utils.FileUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
@@ -55,7 +57,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Lists;
 
 /**
  * Msck repairs table metadata specifically related to partition information to be in-sync with directories in table
@@ -300,9 +301,11 @@ public class Msck {
         ret = false;
       }
     } else {
+      LOG.info("txnId: {} failed. Aborting..", txnId);
+      AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnId);
+      abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_MSCK_TXN.getErrorCode());
       try {
-        LOG.info("txnId: {} failed. Aborting..", txnId);
-        getMsc().abortTxns(Lists.newArrayList(txnId));
+        getMsc().rollbackTxn(abortTxnRequest);
       } catch (Exception e) {
         LOG.error("Error while aborting txnId: {} for table: {}", txnId, qualifiedTableName, e);
         ret = false;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java
new file mode 100644
index 00000000000..6d7181db667
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.txn;
+
+/**
+ * The following class represents all the error messages that are handled for aborts.
+ */
+public enum TxnErrorMsg {
+    // Txn Errors Codes: 50000 - 59999.
+    // Query runtime aborts - 50000-50999
+    NONE(50000, "none"),
+    ABORT_QUERY(50001, "abort by query command"),
+    ABORT_CONCURRENT(50002, "concurrent committed transaction"),
+    ABORT_WRITE_CONFLICT(50003, "write conflicts"),
+    ABORT_TIMEOUT(50004, "heartbeat time-out"),
+    ABORT_ROLLBACK(50005, "rollback"),
+    ABORT_COMPACTION_TXN(50006, "compaction transaction abort"),
+    ABORT_MSCK_TXN(50007, "msck transaction abort"),
+    ABORT_MIGRATION_TXN(50008, "managed migration transaction abort"),
+
+    // Replication related aborts - 51000 - 51099
+    ABORT_DEFAULT_REPL_TXN(51000, "Replication:" +
+            "default replication transaction abort"),
+    ABORT_REPLAYED_REPL_TXN(51001, "Replication:" +
+            "replayed replication transaction abort"),
+    ABORT_REPL_WRITEID_TXN(51002, "Replication:" +
+            "abort of allocated txns for referring mapped write ids as aborted ones"),
+    ABORT_FETCH_FAILOVER_METADATA(51003, "Replication:" +
+            "abort of txns while trying to fetch failover metadata"),
+    ABORT_WRITE_TXN_AFTER_TIMEOUT(51004, "Replication:" +
+            "abort of write txns for the db under replication"),
+    ABORT_ONGOING_TXN_FOR_TARGET_DB(51005, "Replication:" +
+            "abort of ongoing txns(opened prior to failover) for the target database");
+
+    private final long errorCode;
+    private final String errorMsg;
+
+    TxnErrorMsg(int errorCode, String errorMsg) {
+      this.errorCode = errorCode;
+      this.errorMsg = errorMsg;
+    }
+
+    public long getErrorCode() {
+        return errorCode;
+    }
+
+    @Override
+    public String toString() {
+      return errorMsg;
+    }
+
+    public static TxnErrorMsg getTxnErrorMsg(long errorCode) {
+      for (TxnErrorMsg txnErrorMsg : values()) {
+        if (txnErrorMsg.getErrorCode() == errorCode) {
+          return txnErrorMsg;
+        }
+      }
+      return TxnErrorMsg.NONE;
+    }
+
+    public String toSqlString() {
+      return "'" + this.toString() + "'";
+    }
+}
+
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 77863e068ee..b42b25db95f 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -1012,6 +1012,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
   @RetrySemantics.Idempotent
   public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException {
     long txnid = rqst.getTxnid();
+    TxnErrorMsg txnErrorMsg = TxnErrorMsg.NONE;
     long sourceTxnId = -1;
     boolean isReplayedReplTxn = TxnType.REPL_CREATED.equals(rqst.getTxn_type());
     boolean isHiveReplTxn = rqst.isSetReplPolicy() && TxnType.DEFAULT.equals(rqst.getTxn_type());
@@ -1053,7 +1054,16 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
           }
           raiseTxnUnexpectedState(status, txnid);
         }
-        abortTxns(dbConn, Collections.singletonList(txnid), true, isReplayedReplTxn);
+
+        if (isReplayedReplTxn) {
+          txnErrorMsg = TxnErrorMsg.ABORT_REPLAYED_REPL_TXN;
+        } else if (isHiveReplTxn) {
+          txnErrorMsg = TxnErrorMsg.ABORT_DEFAULT_REPL_TXN;
+        } else if (rqst.isSetErrorCode()) {
+          txnErrorMsg = TxnErrorMsg.getTxnErrorMsg(rqst.getErrorCode());
+        }
+
+        abortTxns(dbConn, Collections.singletonList(txnid), true, isReplayedReplTxn, txnErrorMsg);
 
         if (isReplayedReplTxn) {
           deleteReplTxnMapEntry(dbConn, sourceTxnId, rqst.getReplPolicy());
@@ -1087,6 +1097,10 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
   @RetrySemantics.Idempotent
   public void abortTxns(AbortTxnsRequest rqst) throws MetaException {
     List<Long> txnIds = rqst.getTxn_ids();
+    TxnErrorMsg txnErrorMsg = TxnErrorMsg.NONE;
+    if (rqst.isSetErrorCode()) {
+      txnErrorMsg = TxnErrorMsg.getTxnErrorMsg(rqst.getErrorCode());
+    }
     try {
       Connection dbConn = null;
       Statement stmt = null;
@@ -1113,7 +1127,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
             }
           }
         }
-        int numAborted = abortTxns(dbConn, txnIds, false, false);
+        int numAborted = abortTxns(dbConn, txnIds, false, false, txnErrorMsg);
         if (numAborted != txnIds.size()) {
           LOG.warn(
               "Abort Transactions command only aborted {} out of {} transactions. It's possible that the other"
@@ -1514,7 +1528,8 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
                   dbConn.rollback(undoWriteSetForCurrentTxn);
                   LOG.info(msg);
                   //todo: should make abortTxns() write something into TXNS.TXN_META_INFO about this
-                  if (abortTxns(dbConn, Collections.singletonList(txnid), false, isReplayedReplTxn) != 1) {
+                  if (abortTxns(dbConn, Collections.singletonList(txnid), false, isReplayedReplTxn,
+                          TxnErrorMsg.ABORT_WRITE_CONFLICT) != 1) {
                     throw new IllegalStateException(msg + " FAILED!");
                   }
                   dbConn.commit();
@@ -1878,7 +1893,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
           }
 
           // Abort all the allocated txns so that the mapped write ids are referred as aborted ones.
-          int numAborts = abortTxns(dbConn, txnIds, false, false);
+          int numAborts = abortTxns(dbConn, txnIds, false, false, TxnErrorMsg.ABORT_REPL_WRITEID_TXN);
           assert(numAborts == numAbortedWrites);
         }
 
@@ -5076,8 +5091,8 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
   private static Map<LockType, Map<LockType, Map<LockState, LockAction>>> jumpTable;
 
   private int abortTxns(Connection dbConn, List<Long> txnids,
-                        boolean skipCount, boolean isReplReplayed) throws SQLException, MetaException {
-    return abortTxns(dbConn, txnids, false, skipCount, isReplReplayed);
+                        boolean skipCount, boolean isReplReplayed, TxnErrorMsg txnErrorMsg) throws SQLException, MetaException {
+    return abortTxns(dbConn, txnids, false, skipCount, isReplReplayed, txnErrorMsg);
   }
   /**
    * TODO: expose this as an operation to client.  Useful for streaming API to abort all remaining
@@ -5094,12 +5109,14 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
    * @throws SQLException
    */
   private int abortTxns(Connection dbConn, List<Long> txnids, boolean checkHeartbeat,
-                        boolean skipCount, boolean isReplReplayed)
+                        boolean skipCount, boolean isReplReplayed, TxnErrorMsg txnErrorMsg)
       throws SQLException, MetaException {
     Statement stmt = null;
     if (txnids.isEmpty()) {
       return 0;
     }
+    Collections.sort(txnids);
+    LOG.debug("Aborting {} transaction(s) {} due to {}", txnids.size(), txnids, txnErrorMsg);
     removeTxnsFromMinHistoryLevel(dbConn, txnids);
     try {
       stmt = dbConn.createStatement();
@@ -5111,6 +5128,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
 
       // add update txns queries to query list
       prefix.append("UPDATE \"TXNS\" SET \"TXN_STATE\" = ").append(TxnStatus.ABORTED)
+              .append(" , \"TXN_META_INFO\" = ").append(txnErrorMsg.toSqlString())
               .append(" WHERE \"TXN_STATE\" = ").append(TxnStatus.OPEN).append(" AND ");
       if (checkHeartbeat) {
         suffix.append(" AND \"TXN_LAST_HEARTBEAT\" < ")
@@ -5150,6 +5168,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
       if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) {
         Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_ABORTED_TXNS).inc(txnids.size());
       }
+      LOG.warn("Aborted {} transaction(s) {} due to {}", txnids.size(), txnids, txnErrorMsg);
       return numAborted;
     } finally {
       closeStmt(stmt);
@@ -5253,7 +5272,8 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
             " since a concurrent committed transaction [" + JavaUtils.txnIdToString(rs.getLong(4)) + "," + rs.getLong(5) +
             "] has already updated resource '" + resourceName + "'";
           LOG.info(msg);
-          if (abortTxns(dbConn, Collections.singletonList(writeSet.get(0).txnId), false, false) != 1) {
+          if (abortTxns(dbConn, Collections.singletonList(writeSet.get(0).txnId), false, false,
+                  TxnErrorMsg.ABORT_CONCURRENT) != 1) {
             throw new IllegalStateException(msg + " FAILED!");
           }
           dbConn.commit();
@@ -5771,12 +5791,10 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
         close(rs, stmt, null);
         int numTxnsAborted = 0;
         for(List<Long> batchToAbort : timedOutTxns) {
-          if (abortTxns(dbConn, batchToAbort, true, false, false) == batchToAbort.size()) {
+          if (abortTxns(dbConn, batchToAbort, true, false, false, TxnErrorMsg.ABORT_TIMEOUT) == batchToAbort.size()) {
             dbConn.commit();
             numTxnsAborted += batchToAbort.size();
             //todo: add TXNS.COMMENT filed and set it to 'aborted by system due to timeout'
-            Collections.sort(batchToAbort);//easier to read logs
-            LOG.info("Aborted the following transactions due to timeout: {}", batchToAbort.toString());
           }
           else {
             //could not abort all txns in this batch - this may happen because in parallel with this
@@ -5787,7 +5805,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
             dbConn.rollback();
           }
         }
-        LOG.info("Aborted {} transactions due to timeout", numTxnsAborted);
+        LOG.info("Aborted {} transaction(s) due to timeout", numTxnsAborted);
         if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) {
           Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_TIMED_OUT_TXNS).inc(numTxnsAborted);
         }
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index dc73c81c16e..9bb386c4e84 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -2473,6 +2473,11 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
     client.abort_txn(new AbortTxnRequest(txnid));
   }
 
+  @Override
+  public void rollbackTxn(AbortTxnRequest abortTxnRequest) throws NoSuchTxnException, TException {
+    client.abort_txn(abortTxnRequest);
+  }
+
   @Override
   public void replRollbackTxn(long srcTxnId, String replPolicy, TxnType txnType) throws NoSuchTxnException, TException {
     AbortTxnRequest rqst = new AbortTxnRequest(srcTxnId);
@@ -2516,6 +2521,11 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
     client.abort_txns(new AbortTxnsRequest(txnids));
   }
 
+  @Override
+  public void abortTxns(AbortTxnsRequest abortTxnsRequest) throws NoSuchTxnException, TException {
+    client.abort_txns(abortTxnsRequest);
+  }
+
   @Override
   public void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List<String> partNames)
           throws TException {
diff --git a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java
index 9215e1b80cf..b3c83b95b06 100644
--- a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java
+++ b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.metastore.LockComponentBuilder;
 import org.apache.hadoop.hive.metastore.LockRequestBuilder;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
 import org.apache.hadoop.hive.metastore.api.DataOperationType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hive.metastore.api.LockState;
 import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
 import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
 import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
 import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 import org.apache.hadoop.hive.ql.lockmgr.LockException;
 import org.apache.thrift.TException;
@@ -366,7 +368,9 @@ public class TransactionBatch extends AbstractStreamingTransaction {
                 ? 1 : 0), 0);
         for (currentTxnIndex = minOpenTxnIndex;
              currentTxnIndex < txnToWriteIds.size(); currentTxnIndex++) {
-          conn.getMSC().rollbackTxn(txnToWriteIds.get(currentTxnIndex).getTxnId());
+          AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnToWriteIds.get(currentTxnIndex).getTxnId());
+          abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_ROLLBACK.getErrorCode());
+          conn.getMSC().rollbackTxn(abortTxnRequest);
           txnStatus[currentTxnIndex] = HiveStreamingConnection.TxnState.ABORTED;
         }
         currentTxnIndex--; //since the loop left it == txnToWriteIds.size()
@@ -381,7 +385,9 @@ public class TransactionBatch extends AbstractStreamingTransaction {
         }
         long currTxnId = getCurrentTxnId();
         if (currTxnId > 0) {
-          conn.getMSC().rollbackTxn(currTxnId);
+          AbortTxnRequest abortTxnRequest = new AbortTxnRequest(currTxnId);
+          abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_ROLLBACK.getErrorCode());
+          conn.getMSC().rollbackTxn(abortTxnRequest);
           txnStatus[currentTxnIndex] = HiveStreamingConnection.TxnState.ABORTED;
         }
       }