You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pv...@apache.org on 2020/02/03 09:36:23 UTC

[hive] branch master updated: HIVE-22729: Provide a failure reason for failed compactions (Laszlo Pinter reviewed by Karen Coppage, Denys Kuzmenko and Peter Vary)

This is an automated email from the ASF dual-hosted git repository.

pvary pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 5acbffe  HIVE-22729: Provide a failure reason for failed compactions (Laszlo Pinter reviewed by Karen Coppage, Denys Kuzmenko and Peter Vary)
5acbffe is described below

commit 5acbffee33da8cc2236b759ec4f55cb063fc0754
Author: Laszlo Pinter <lp...@cloudera.com>
AuthorDate: Mon Feb 3 10:35:20 2020 +0100

    HIVE-22729: Provide a failure reason for failed compactions (Laszlo Pinter reviewed by Karen Coppage, Denys Kuzmenko and Peter Vary)
---
 .../upgrade/hive/hive-schema-4.0.0.hive.sql        |  12 ++-
 .../upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql   |  12 ++-
 .../show/compactions/ShowCompactionsDesc.java      |   4 +-
 .../show/compactions/ShowCompactionsOperation.java |   5 +
 .../hadoop/hive/ql/txn/compactor/Cleaner.java      |   1 +
 .../hadoop/hive/ql/txn/compactor/Initiator.java    |  13 ++-
 .../hadoop/hive/ql/txn/compactor/Worker.java       |  23 +++--
 .../metastore/txn/TestCompactionTxnHandler.java    |  57 +++++++++--
 .../clientpositive/dbtxnmgr_showlocks.q.out        |   4 +-
 .../test/results/clientpositive/llap/sysdb.q.out   |   2 +
 .../hive/metastore/api/CompactionInfoStruct.java   | 114 ++++++++++++++++++++-
 .../metastore/api/ShowCompactResponseElement.java  | 114 ++++++++++++++++++++-
 .../src/gen/thrift/gen-php/metastore/Types.php     |  46 +++++++++
 .../src/gen/thrift/gen-py/hive_metastore/ttypes.py |  30 +++++-
 .../src/gen/thrift/gen-rb/hive_metastore_types.rb  |   8 +-
 .../src/main/thrift/hive_metastore.thrift          |   2 +
 .../hadoop/hive/metastore/txn/CompactionInfo.java  |  10 +-
 .../hive/metastore/txn/CompactionTxnHandler.java   |  24 ++++-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java       |   6 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java      |   7 +-
 .../src/main/sql/derby/hive-schema-4.0.0.derby.sql |   6 +-
 .../sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql     |   5 +
 .../src/main/sql/mssql/hive-schema-4.0.0.mssql.sql |   2 +
 .../sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql     |   4 +
 .../src/main/sql/mysql/hive-schema-4.0.0.mysql.sql |   6 +-
 .../sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql     |   5 +-
 .../main/sql/oracle/hive-schema-4.0.0.oracle.sql   |   6 +-
 .../sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql   |   5 +-
 .../sql/postgres/hive-schema-4.0.0.postgres.sql    |   6 +-
 .../postgres/upgrade-3.2.0-to-4.0.0.postgres.sql   |   3 +
 30 files changed, 483 insertions(+), 59 deletions(-)

diff --git a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
index 5421d4d..e3f5eb9 100644
--- a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
@@ -1087,7 +1087,8 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` (
   `CQ_START` bigint,
   `CQ_RUN_AS` string,
   `CQ_HIGHEST_WRITE_ID` bigint,
-  `CQ_HADOOP_JOB_ID` string
+  `CQ_HADOOP_JOB_ID` string,
+  `CQ_ERROR_MESSAGE` string
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
 TBLPROPERTIES (
@@ -1105,7 +1106,8 @@ TBLPROPERTIES (
   \"COMPACTION_QUEUE\".\"CQ_START\",
   \"COMPACTION_QUEUE\".\"CQ_RUN_AS\",
   \"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\",
-  \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\"
+  \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\",
+  \"COMPACTION_QUEUE\".\"CQ_ERROR_MESSAGE\"
 FROM \"COMPACTION_QUEUE\"
 "
 );
@@ -1123,7 +1125,8 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` (
   `CC_END` bigint,
   `CC_RUN_AS` string,
   `CC_HIGHEST_WRITE_ID` bigint,
-  `CC_HADOOP_JOB_ID` string
+  `CC_HADOOP_JOB_ID` string,
+  `CC_ERROR_MESSAGE` string
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
 TBLPROPERTIES (
@@ -1142,7 +1145,8 @@ TBLPROPERTIES (
   \"COMPLETED_COMPACTIONS\".\"CC_END\",
   \"COMPLETED_COMPACTIONS\".\"CC_RUN_AS\",
   \"COMPLETED_COMPACTIONS\".\"CC_HIGHEST_WRITE_ID\",
-  \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\"
+  \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\",
+  \"COMPLETED_COMPACTIONS\".\"CC_ERROR_MESSAGE\"
 FROM \"COMPLETED_COMPACTIONS\"
 "
 );
diff --git a/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql b/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
index 0411906..fa51874 100644
--- a/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
@@ -205,7 +205,8 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` (
   `CQ_START` bigint,
   `CQ_RUN_AS` string,
   `CQ_HIGHEST_WRITE_ID` bigint,
-  `CQ_HADOOP_JOB_ID` string
+  `CQ_HADOOP_JOB_ID` string,
+  `CQ_ERROR_MESSAGE` string
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
 TBLPROPERTIES (
@@ -223,7 +224,8 @@ TBLPROPERTIES (
   \"COMPACTION_QUEUE\".\"CQ_START\",
   \"COMPACTION_QUEUE\".\"CQ_RUN_AS\",
   \"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\",
-  \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\"
+  \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\",
+  \"COMPACTION_QUEUE\".\"CQ_ERROR_MESSAGE\"
 FROM \"COMPACTION_QUEUE\"
 "
 );
@@ -241,7 +243,8 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` (
   `CC_END` bigint,
   `CC_RUN_AS` string,
   `CC_HIGHEST_WRITE_ID` bigint,
-  `CC_HADOOP_JOB_ID` string
+  `CC_HADOOP_JOB_ID` string,
+  `CC_ERROR_MESSAGE` string
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
 TBLPROPERTIES (
@@ -260,7 +263,8 @@ TBLPROPERTIES (
   \"COMPLETED_COMPACTIONS\".\"CC_END\",
   \"COMPLETED_COMPACTIONS\".\"CC_RUN_AS\",
   \"COMPLETED_COMPACTIONS\".\"CC_HIGHEST_WRITE_ID\",
-  \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\"
+  \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\",
+  \"COMPLETED_COMPACTIONS\".\"CC_ERROR_MESSAGE\"
 FROM \"COMPLETED_COMPACTIONS\"
 "
 );
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
index 9348efc..e470914 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
@@ -32,8 +32,8 @@ public class ShowCompactionsDesc implements DDLDesc, Serializable {
   private static final long serialVersionUID = 1L;
 
   public static final String SCHEMA =
-      "compactionid,dbname,tabname,partname,type,state,hostname,workerid,starttime,duration,hadoopjobid#" +
-      "string:string:string:string:string:string:string:string:string:string:string";
+      "compactionid,dbname,tabname,partname,type,state,hostname,workerid,starttime,duration,hadoopjobid,errormessage#" +
+      "string:string:string:string:string:string:string:string:string:string:string:string";
 
   private String resFile;
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
index 517d882..d45597b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
@@ -86,6 +86,8 @@ public class ShowCompactionsOperation extends DDLOperation<ShowCompactionsDesc>
     os.writeBytes("Duration(ms)");
     os.write(Utilities.tabCode);
     os.writeBytes("HadoopJobId");
+    os.write(Utilities.tabCode);
+    os.writeBytes("Error message");
     os.write(Utilities.newLineCode);
   }
 
@@ -115,6 +117,9 @@ public class ShowCompactionsOperation extends DDLOperation<ShowCompactionsDesc>
     os.writeBytes(e.isSetEndTime() ? Long.toString(e.getEndTime() - e.getStart()) : NO_VAL);
     os.write(Utilities.tabCode);
     os.writeBytes(e.isSetHadoopJobId() ?  e.getHadoopJobId() : NO_VAL);
+    os.write(Utilities.tabCode);
+    String error = e.getErrorMessage();
+    os.writeBytes(error == null ? NO_VAL : error);
     os.write(Utilities.newLineCode);
   }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
index 6f64290..9ba2b24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
@@ -217,6 +217,7 @@ public class Cleaner extends MetaStoreCompactorThread {
     } catch (Exception e) {
       LOG.error("Caught exception when cleaning, unable to complete cleaning of " + ci + " " +
           StringUtils.stringifyException(e));
+      ci.errorMessage = e.getMessage();
       txnHandler.markFailed(ci);
     }
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
index dedc990..37a5862 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
@@ -140,6 +140,7 @@ public class Initiator extends MetaStoreCompactorThread {
             } catch (Throwable t) {
               LOG.error("Caught exception while trying to determine if we should compact {}. " +
                   "Marking failed to avoid repeated failures, {}", ci, t);
+              ci.errorMessage = t.getMessage();
               txnHandler.markFailed(ci);
             }
           }
@@ -459,12 +460,20 @@ public class Initiator extends MetaStoreCompactorThread {
       if (txnHandler.checkFailedCompactions(ci)) {
         LOG.warn("Will not initiate compaction for " + ci.getFullPartitionName() + " since last " +
             MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD + " attempts to compact it failed.");
+        ci.errorMessage = "Compaction is not initiated since last " +
+            MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD + " consecutive compaction attempts failed)";
         txnHandler.markFailed(ci);
         return false;
       }
     } catch (Throwable e) {
-      LOG.error("Caught exception while checking compaction eligibility " +
-          StringUtils.stringifyException(e));
+      LOG.error("Caught exception while checking compaction eligibility.", e);
+      try {
+        ci.errorMessage = e.getMessage();
+        txnHandler.markFailed(ci);
+      } catch (MetaException ex) {
+        LOG.error("Caught exception while marking compaction as failed.", e);
+        return false;
+      }
     }
     return true;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
index 5aff71e..383969a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
@@ -94,12 +94,12 @@ public class Worker extends RemoteCompactorThread implements MetaStoreThread {
       // Make sure nothing escapes this run method and kills the metastore at large,
       // so wrap it in a big catch Throwable statement.
       CompactionHeartbeater heartbeater = null;
+      CompactionInfo ci = null;
       try {
         if (msc == null) {
           msc = HiveMetaStoreUtils.getHiveMetastoreClient(conf);
         }
-        final CompactionInfo ci = CompactionInfo.optionalCompactionInfoStructToInfo(
-            msc.findNextCompact(workerName));
+        ci = CompactionInfo.optionalCompactionInfoStructToInfo(msc.findNextCompact(workerName));
         LOG.debug("Processing compaction request " + ci);
 
         if (ci == null && !stop.get()) {
@@ -211,10 +211,11 @@ public class Worker extends RemoteCompactorThread implements MetaStoreThread {
             UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(),
               UserGroupInformation.getLoginUser());
             final Partition fp = p;
+            final CompactionInfo fci = ci;
             ugi.doAs(new PrivilegedExceptionAction<Object>() {
               @Override
               public Object run() throws Exception {
-                mr.run(conf, jobName.toString(), t, fp, sd, tblValidWriteIds, ci, su, msc, dir);
+                mr.run(conf, jobName.toString(), t, fp, sd, tblValidWriteIds, fci, su, msc, dir);
                 return null;
               }
             });
@@ -234,16 +235,26 @@ public class Worker extends RemoteCompactorThread implements MetaStoreThread {
         } catch (Throwable e) {
           LOG.error("Caught exception while trying to compact " + ci +
               ".  Marking failed to avoid repeated failures, " + StringUtils.stringifyException(e));
+          ci.errorMessage = e.getMessage();
           msc.markFailed(CompactionInfo.compactionInfoToStruct(ci));
           msc.abortTxns(Collections.singletonList(compactorTxnId));
         }
       } catch (TException | IOException t) {
         LOG.error("Caught an exception in the main loop of compactor worker " + workerName + ", " +
             StringUtils.stringifyException(t));
-        if (msc != null) {
-          msc.close();
+        try {
+          if (msc != null && ci != null) {
+            ci.errorMessage = t.getMessage();
+            msc.markFailed(CompactionInfo.compactionInfoToStruct(ci));
+          }
+        } catch (TException e) {
+          LOG.error("Caught an exception while trying to mark compaction {} as failed: {}", ci, e);
+        } finally {
+          if (msc != null) {
+            msc.close();
+            msc = null;
+          }
         }
-        msc = null;
         try {
           Thread.sleep(SLEEP_TIME);
         } catch (InterruptedException e) {
diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
index e589554..15fcfc0 100644
--- a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
+++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
@@ -232,17 +232,43 @@ public class TestCompactionTxnHandler {
   }
 
   @Test
+  public void testShowCompactions() throws Exception {
+    final String dbName = "foo";
+    final String tableName = "bar";
+    final String partitionName = "ds=today";
+    CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MINOR);
+    rqst.setPartitionname(partitionName);
+    txnHandler.compact(rqst);
+    ShowCompactResponse showCompactResponse = txnHandler.showCompact(new ShowCompactRequest());
+    showCompactResponse.getCompacts().forEach(e -> {
+      assertEquals(dbName, e.getDbname());
+      assertEquals(tableName, e.getTablename());
+      assertEquals(partitionName, e.getPartitionname());
+      assertEquals("initiated", e.getState());
+      assertEquals(CompactionType.MINOR, e.getType());
+      assertEquals(1, e.getId());
+    });
+  }
+
+  @Test
   public void testMarkFailed() throws Exception {
-    CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR);
-    rqst.setPartitionname("ds=today");
+    final String dbName = "foo";
+    final String tableName = "bar";
+    final String partitionName = "ds=today";
+    final String workerId = "fred";
+    final String status = "failed";
+    final String errorMessage = "Dummy error";
+    CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MINOR);
+    rqst.setPartitionname(partitionName);
     txnHandler.compact(rqst);
     assertEquals(0, txnHandler.findReadyToClean().size());
-    CompactionInfo ci = txnHandler.findNextToCompact("fred");
+    CompactionInfo ci = txnHandler.findNextToCompact(workerId);
     assertNotNull(ci);
 
     assertEquals(0, txnHandler.findReadyToClean().size());
+    ci.errorMessage = errorMessage;
     txnHandler.markFailed(ci);
-    assertNull(txnHandler.findNextToCompact("fred"));
+    assertNull(txnHandler.findNextToCompact(workerId));
     boolean failedCheck = txnHandler.checkFailedCompactions(ci);
     assertFalse(failedCheck);
     try {
@@ -262,23 +288,37 @@ public class TestCompactionTxnHandler {
 
     // Add more failed compactions so that the total is exactly COMPACTOR_INITIATOR_FAILED_THRESHOLD
     for (int i = 1 ; i <  conf.getIntVar(HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD); i++) {
-      addFailedCompaction("foo", "bar", CompactionType.MINOR, "ds=today");
+      addFailedCompaction(dbName, tableName, CompactionType.MINOR, partitionName, errorMessage);
     }
     // Now checkFailedCompactions() will return true
     assertTrue(txnHandler.checkFailedCompactions(ci));
-
+    // Check the output of show compactions
+    checkShowCompaction(dbName, tableName, partitionName, status, errorMessage);
     // Now add enough failed compactions to ensure purgeCompactionHistory() will attempt delete;
     // HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED is enough for this.
     // But we also want enough to tickle the code in TxnUtils.buildQueryWithINClauseStrings()
     // so that it produces multiple queries. For that we need at least 290.
     for (int i = 0 ; i < 300; i++) {
-      addFailedCompaction("foo", "bar", CompactionType.MINOR, "ds=today");
+      addFailedCompaction(dbName, tableName, CompactionType.MINOR, partitionName, errorMessage);
     }
+    checkShowCompaction(dbName, tableName, partitionName, status, errorMessage);
     txnHandler.purgeCompactionHistory();
   }
 
+  private void checkShowCompaction(String dbName, String tableName, String partition,
+      String status, String errorMessage) throws MetaException {
+    ShowCompactResponse showCompactResponse = txnHandler.showCompact(new ShowCompactRequest());
+    showCompactResponse.getCompacts().forEach(e -> {
+      assertEquals(dbName, e.getDbname());
+      assertEquals(tableName, e.getTablename());
+      assertEquals(partition, e.getPartitionname());
+      assertEquals(status, e.getState());
+      assertEquals(errorMessage, e.getErrorMessage());
+    });
+  }
+
   private void addFailedCompaction(String dbName, String tableName, CompactionType type,
-      String partitionName) throws MetaException {
+      String partitionName, String errorMessage) throws MetaException {
     CompactionRequest rqst;
     CompactionInfo ci;
     rqst = new CompactionRequest(dbName, tableName, type);
@@ -286,6 +326,7 @@ public class TestCompactionTxnHandler {
     txnHandler.compact(rqst);
     ci = txnHandler.findNextToCompact("fred");
     assertNotNull(ci);
+    ci.errorMessage = errorMessage;
     txnHandler.markFailed(ci);
   }
 
diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out
index 03c6724..fd7ad23 100644
--- a/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out
+++ b/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out
@@ -145,8 +145,8 @@ PREHOOK: query: show compactions
 PREHOOK: type: SHOW COMPACTIONS
 POSTHOOK: query: show compactions
 POSTHOOK: type: SHOW COMPACTIONS
-CompactionId	Database	Table	Partition	Type	State	Hostname	Worker	Start Time	Duration(ms)	HadoopJobId
-1	default	partitioned_acid_table	p=abc	MINOR	initiated	 --- 	 --- 	 --- 	 --- 	 --- 
+CompactionId	Database	Table	Partition	Type	State	Hostname	Worker	Start Time	Duration(ms)	HadoopJobId	Error message
+1	default	partitioned_acid_table	p=abc	MINOR	initiated	 --- 	 --- 	 --- 	 --- 	 --- 	 --- 
 PREHOOK: query: drop table partitioned_acid_table
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@partitioned_acid_table
diff --git a/ql/src/test/results/clientpositive/llap/sysdb.q.out b/ql/src/test/results/clientpositive/llap/sysdb.q.out
index 6ec2a06..38cadf3 100644
--- a/ql/src/test/results/clientpositive/llap/sysdb.q.out
+++ b/ql/src/test/results/clientpositive/llap/sysdb.q.out
@@ -546,6 +546,7 @@ columns_v2	comment
 columns_v2	integer_idx
 columns_v2	type_name
 compaction_queue	cq_database
+compaction_queue	cq_error_message
 #### A masked pattern was here ####
 compaction_queue	cq_highest_write_id
 compaction_queue	cq_id
@@ -586,6 +587,7 @@ compactions	c_worker_id
 compactions	c_worker_id
 completed_compactions	cc_database
 completed_compactions	cc_end
+completed_compactions	cc_error_message
 #### A masked pattern was here ####
 completed_compactions	cc_highest_write_id
 completed_compactions	cc_id
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java
index 4aee45c..31b6ed4 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java
@@ -50,6 +50,7 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField WORKER_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("workerId", org.apache.thrift.protocol.TType.STRING, (short)10);
   private static final org.apache.thrift.protocol.TField START_FIELD_DESC = new org.apache.thrift.protocol.TField("start", org.apache.thrift.protocol.TType.I64, (short)11);
   private static final org.apache.thrift.protocol.TField HIGHEST_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("highestWriteId", org.apache.thrift.protocol.TType.I64, (short)12);
+  private static final org.apache.thrift.protocol.TField ERROR_MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorMessage", org.apache.thrift.protocol.TType.STRING, (short)13);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -69,6 +70,7 @@ import org.slf4j.LoggerFactory;
   private String workerId; // optional
   private long start; // optional
   private long highestWriteId; // optional
+  private String errorMessage; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -87,7 +89,8 @@ import org.slf4j.LoggerFactory;
     STATE((short)9, "state"),
     WORKER_ID((short)10, "workerId"),
     START((short)11, "start"),
-    HIGHEST_WRITE_ID((short)12, "highestWriteId");
+    HIGHEST_WRITE_ID((short)12, "highestWriteId"),
+    ERROR_MESSAGE((short)13, "errorMessage");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -126,6 +129,8 @@ import org.slf4j.LoggerFactory;
           return START;
         case 12: // HIGHEST_WRITE_ID
           return HIGHEST_WRITE_ID;
+        case 13: // ERROR_MESSAGE
+          return ERROR_MESSAGE;
         default:
           return null;
       }
@@ -171,7 +176,7 @@ import org.slf4j.LoggerFactory;
   private static final int __START_ISSET_ID = 2;
   private static final int __HIGHESTWRITEID_ISSET_ID = 3;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.TOOMANYABORTS,_Fields.STATE,_Fields.WORKER_ID,_Fields.START,_Fields.HIGHEST_WRITE_ID};
+  private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.TOOMANYABORTS,_Fields.STATE,_Fields.WORKER_ID,_Fields.START,_Fields.HIGHEST_WRITE_ID,_Fields.ERROR_MESSAGE};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -199,6 +204,8 @@ import org.slf4j.LoggerFactory;
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.HIGHEST_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("highestWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.ERROR_MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("errorMessage", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionInfoStruct.class, metaDataMap);
   }
@@ -253,6 +260,9 @@ import org.slf4j.LoggerFactory;
     }
     this.start = other.start;
     this.highestWriteId = other.highestWriteId;
+    if (other.isSetErrorMessage()) {
+      this.errorMessage = other.errorMessage;
+    }
   }
 
   public CompactionInfoStruct deepCopy() {
@@ -277,6 +287,7 @@ import org.slf4j.LoggerFactory;
     this.start = 0;
     setHighestWriteIdIsSet(false);
     this.highestWriteId = 0;
+    this.errorMessage = null;
   }
 
   public long getId() {
@@ -559,6 +570,29 @@ import org.slf4j.LoggerFactory;
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HIGHESTWRITEID_ISSET_ID, value);
   }
 
+  public String getErrorMessage() {
+    return this.errorMessage;
+  }
+
+  public void setErrorMessage(String errorMessage) {
+    this.errorMessage = errorMessage;
+  }
+
+  public void unsetErrorMessage() {
+    this.errorMessage = null;
+  }
+
+  /** Returns true if field errorMessage is set (has been assigned a value) and false otherwise */
+  public boolean isSetErrorMessage() {
+    return this.errorMessage != null;
+  }
+
+  public void setErrorMessageIsSet(boolean value) {
+    if (!value) {
+      this.errorMessage = null;
+    }
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case ID:
@@ -657,6 +691,14 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
+    case ERROR_MESSAGE:
+      if (value == null) {
+        unsetErrorMessage();
+      } else {
+        setErrorMessage((String)value);
+      }
+      break;
+
     }
   }
 
@@ -698,6 +740,9 @@ import org.slf4j.LoggerFactory;
     case HIGHEST_WRITE_ID:
       return getHighestWriteId();
 
+    case ERROR_MESSAGE:
+      return getErrorMessage();
+
     }
     throw new IllegalStateException();
   }
@@ -733,6 +778,8 @@ import org.slf4j.LoggerFactory;
       return isSetStart();
     case HIGHEST_WRITE_ID:
       return isSetHighestWriteId();
+    case ERROR_MESSAGE:
+      return isSetErrorMessage();
     }
     throw new IllegalStateException();
   }
@@ -858,6 +905,15 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
+    boolean this_present_errorMessage = true && this.isSetErrorMessage();
+    boolean that_present_errorMessage = true && that.isSetErrorMessage();
+    if (this_present_errorMessage || that_present_errorMessage) {
+      if (!(this_present_errorMessage && that_present_errorMessage))
+        return false;
+      if (!this.errorMessage.equals(that.errorMessage))
+        return false;
+    }
+
     return true;
   }
 
@@ -925,6 +981,11 @@ import org.slf4j.LoggerFactory;
     if (present_highestWriteId)
       list.add(highestWriteId);
 
+    boolean present_errorMessage = true && (isSetErrorMessage());
+    list.add(present_errorMessage);
+    if (present_errorMessage)
+      list.add(errorMessage);
+
     return list.hashCode();
   }
 
@@ -1056,6 +1117,16 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetErrorMessage()).compareTo(other.isSetErrorMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetErrorMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorMessage, other.errorMessage);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -1171,6 +1242,16 @@ import org.slf4j.LoggerFactory;
       sb.append(this.highestWriteId);
       first = false;
     }
+    if (isSetErrorMessage()) {
+      if (!first) sb.append(", ");
+      sb.append("errorMessage:");
+      if (this.errorMessage == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.errorMessage);
+      }
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -1328,6 +1409,14 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 13: // ERROR_MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.errorMessage = iprot.readString();
+              struct.setErrorMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -1409,6 +1498,13 @@ import org.slf4j.LoggerFactory;
         oprot.writeI64(struct.highestWriteId);
         oprot.writeFieldEnd();
       }
+      if (struct.errorMessage != null) {
+        if (struct.isSetErrorMessage()) {
+          oprot.writeFieldBegin(ERROR_MESSAGE_FIELD_DESC);
+          oprot.writeString(struct.errorMessage);
+          oprot.writeFieldEnd();
+        }
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -1455,7 +1551,10 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetHighestWriteId()) {
         optionals.set(7);
       }
-      oprot.writeBitSet(optionals, 8);
+      if (struct.isSetErrorMessage()) {
+        optionals.set(8);
+      }
+      oprot.writeBitSet(optionals, 9);
       if (struct.isSetPartitionname()) {
         oprot.writeString(struct.partitionname);
       }
@@ -1480,6 +1579,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetHighestWriteId()) {
         oprot.writeI64(struct.highestWriteId);
       }
+      if (struct.isSetErrorMessage()) {
+        oprot.writeString(struct.errorMessage);
+      }
     }
 
     @Override
@@ -1493,7 +1595,7 @@ import org.slf4j.LoggerFactory;
       struct.setTablenameIsSet(true);
       struct.type = org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32());
       struct.setTypeIsSet(true);
-      BitSet incoming = iprot.readBitSet(8);
+      BitSet incoming = iprot.readBitSet(9);
       if (incoming.get(0)) {
         struct.partitionname = iprot.readString();
         struct.setPartitionnameIsSet(true);
@@ -1526,6 +1628,10 @@ import org.slf4j.LoggerFactory;
         struct.highestWriteId = iprot.readI64();
         struct.setHighestWriteIdIsSet(true);
       }
+      if (incoming.get(8)) {
+        struct.errorMessage = iprot.readString();
+        struct.setErrorMessageIsSet(true);
+      }
     }
   }
 
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
index 8a5682a..ea5c47e 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
@@ -51,6 +51,7 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField END_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("endTime", org.apache.thrift.protocol.TType.I64, (short)11);
   private static final org.apache.thrift.protocol.TField HADOOP_JOB_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("hadoopJobId", org.apache.thrift.protocol.TType.STRING, (short)12);
   private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64, (short)13);
+  private static final org.apache.thrift.protocol.TField ERROR_MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorMessage", org.apache.thrift.protocol.TType.STRING, (short)14);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -71,6 +72,7 @@ import org.slf4j.LoggerFactory;
   private long endTime; // optional
   private String hadoopJobId; // optional
   private long id; // optional
+  private String errorMessage; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -90,7 +92,8 @@ import org.slf4j.LoggerFactory;
     META_INFO((short)10, "metaInfo"),
     END_TIME((short)11, "endTime"),
     HADOOP_JOB_ID((short)12, "hadoopJobId"),
-    ID((short)13, "id");
+    ID((short)13, "id"),
+    ERROR_MESSAGE((short)14, "errorMessage");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -131,6 +134,8 @@ import org.slf4j.LoggerFactory;
           return HADOOP_JOB_ID;
         case 13: // ID
           return ID;
+        case 14: // ERROR_MESSAGE
+          return ERROR_MESSAGE;
         default:
           return null;
       }
@@ -176,7 +181,7 @@ import org.slf4j.LoggerFactory;
   private static final int __ENDTIME_ISSET_ID = 2;
   private static final int __ID_ISSET_ID = 3;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.WORKERID,_Fields.START,_Fields.RUN_AS,_Fields.HIGHTEST_TXN_ID,_Fields.META_INFO,_Fields.END_TIME,_Fields.HADOOP_JOB_ID,_Fields.ID};
+  private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.WORKERID,_Fields.START,_Fields.RUN_AS,_Fields.HIGHTEST_TXN_ID,_Fields.META_INFO,_Fields.END_TIME,_Fields.HADOOP_JOB_ID,_Fields.ID,_Fields.ERROR_MESSAGE};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -206,6 +211,8 @@ import org.slf4j.LoggerFactory;
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.ERROR_MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("errorMessage", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ShowCompactResponseElement.class, metaDataMap);
   }
@@ -264,6 +271,9 @@ import org.slf4j.LoggerFactory;
       this.hadoopJobId = other.hadoopJobId;
     }
     this.id = other.id;
+    if (other.isSetErrorMessage()) {
+      this.errorMessage = other.errorMessage;
+    }
   }
 
   public ShowCompactResponseElement deepCopy() {
@@ -290,6 +300,7 @@ import org.slf4j.LoggerFactory;
 
     setIdIsSet(false);
     this.id = 0;
+    this.errorMessage = null;
   }
 
   public String getDbname() {
@@ -595,6 +606,29 @@ import org.slf4j.LoggerFactory;
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
   }
 
+  public String getErrorMessage() {
+    return this.errorMessage;
+  }
+
+  public void setErrorMessage(String errorMessage) {
+    this.errorMessage = errorMessage;
+  }
+
+  public void unsetErrorMessage() {
+    this.errorMessage = null;
+  }
+
+  /** Returns true if field errorMessage is set (has been assigned a value) and false otherwise */
+  public boolean isSetErrorMessage() {
+    return this.errorMessage != null;
+  }
+
+  public void setErrorMessageIsSet(boolean value) {
+    if (!value) {
+      this.errorMessage = null;
+    }
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case DBNAME:
@@ -701,6 +735,14 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
+    case ERROR_MESSAGE:
+      if (value == null) {
+        unsetErrorMessage();
+      } else {
+        setErrorMessage((String)value);
+      }
+      break;
+
     }
   }
 
@@ -745,6 +787,9 @@ import org.slf4j.LoggerFactory;
     case ID:
       return getId();
 
+    case ERROR_MESSAGE:
+      return getErrorMessage();
+
     }
     throw new IllegalStateException();
   }
@@ -782,6 +827,8 @@ import org.slf4j.LoggerFactory;
       return isSetHadoopJobId();
     case ID:
       return isSetId();
+    case ERROR_MESSAGE:
+      return isSetErrorMessage();
     }
     throw new IllegalStateException();
   }
@@ -916,6 +963,15 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
+    boolean this_present_errorMessage = true && this.isSetErrorMessage();
+    boolean that_present_errorMessage = true && that.isSetErrorMessage();
+    if (this_present_errorMessage || that_present_errorMessage) {
+      if (!(this_present_errorMessage && that_present_errorMessage))
+        return false;
+      if (!this.errorMessage.equals(that.errorMessage))
+        return false;
+    }
+
     return true;
   }
 
@@ -988,6 +1044,11 @@ import org.slf4j.LoggerFactory;
     if (present_id)
       list.add(id);
 
+    boolean present_errorMessage = true && (isSetErrorMessage());
+    list.add(present_errorMessage);
+    if (present_errorMessage)
+      list.add(errorMessage);
+
     return list.hashCode();
   }
 
@@ -1129,6 +1190,16 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetErrorMessage()).compareTo(other.isSetErrorMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetErrorMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorMessage, other.errorMessage);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -1254,6 +1325,16 @@ import org.slf4j.LoggerFactory;
       sb.append(this.id);
       first = false;
     }
+    if (isSetErrorMessage()) {
+      if (!first) sb.append(", ");
+      sb.append("errorMessage:");
+      if (this.errorMessage == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.errorMessage);
+      }
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -1419,6 +1500,14 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 14: // ERROR_MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.errorMessage = iprot.readString();
+              struct.setErrorMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -1507,6 +1596,13 @@ import org.slf4j.LoggerFactory;
         oprot.writeI64(struct.id);
         oprot.writeFieldEnd();
       }
+      if (struct.errorMessage != null) {
+        if (struct.isSetErrorMessage()) {
+          oprot.writeFieldBegin(ERROR_MESSAGE_FIELD_DESC);
+          oprot.writeString(struct.errorMessage);
+          oprot.writeFieldEnd();
+        }
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -1556,7 +1652,10 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetId()) {
         optionals.set(8);
       }
-      oprot.writeBitSet(optionals, 9);
+      if (struct.isSetErrorMessage()) {
+        optionals.set(9);
+      }
+      oprot.writeBitSet(optionals, 10);
       if (struct.isSetPartitionname()) {
         oprot.writeString(struct.partitionname);
       }
@@ -1584,6 +1683,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetId()) {
         oprot.writeI64(struct.id);
       }
+      if (struct.isSetErrorMessage()) {
+        oprot.writeString(struct.errorMessage);
+      }
     }
 
     @Override
@@ -1597,7 +1699,7 @@ import org.slf4j.LoggerFactory;
       struct.setTypeIsSet(true);
       struct.state = iprot.readString();
       struct.setStateIsSet(true);
-      BitSet incoming = iprot.readBitSet(9);
+      BitSet incoming = iprot.readBitSet(10);
       if (incoming.get(0)) {
         struct.partitionname = iprot.readString();
         struct.setPartitionnameIsSet(true);
@@ -1634,6 +1736,10 @@ import org.slf4j.LoggerFactory;
         struct.id = iprot.readI64();
         struct.setIdIsSet(true);
       }
+      if (incoming.get(9)) {
+        struct.errorMessage = iprot.readString();
+        struct.setErrorMessageIsSet(true);
+      }
     }
   }
 
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
index e8556dc..a5bbc36 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
@@ -22369,6 +22369,10 @@ class CompactionInfoStruct {
    * @var int
    */
   public $highestWriteId = null;
+  /**
+   * @var string
+   */
+  public $errorMessage = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -22421,6 +22425,10 @@ class CompactionInfoStruct {
           'var' => 'highestWriteId',
           'type' => TType::I64,
           ),
+        13 => array(
+          'var' => 'errorMessage',
+          'type' => TType::STRING,
+          ),
         );
     }
     if (is_array($vals)) {
@@ -22460,6 +22468,9 @@ class CompactionInfoStruct {
       if (isset($vals['highestWriteId'])) {
         $this->highestWriteId = $vals['highestWriteId'];
       }
+      if (isset($vals['errorMessage'])) {
+        $this->errorMessage = $vals['errorMessage'];
+      }
     }
   }
 
@@ -22566,6 +22577,13 @@ class CompactionInfoStruct {
             $xfer += $input->skip($ftype);
           }
           break;
+        case 13:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->errorMessage);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -22639,6 +22657,11 @@ class CompactionInfoStruct {
       $xfer += $output->writeI64($this->highestWriteId);
       $xfer += $output->writeFieldEnd();
     }
+    if ($this->errorMessage !== null) {
+      $xfer += $output->writeFieldBegin('errorMessage', TType::STRING, 13);
+      $xfer += $output->writeString($this->errorMessage);
+      $xfer += $output->writeFieldEnd();
+    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;
@@ -22952,6 +22975,10 @@ class ShowCompactResponseElement {
    * @var int
    */
   public $id = null;
+  /**
+   * @var string
+   */
+  public $errorMessage = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -23008,6 +23035,10 @@ class ShowCompactResponseElement {
           'var' => 'id',
           'type' => TType::I64,
           ),
+        14 => array(
+          'var' => 'errorMessage',
+          'type' => TType::STRING,
+          ),
         );
     }
     if (is_array($vals)) {
@@ -23050,6 +23081,9 @@ class ShowCompactResponseElement {
       if (isset($vals['id'])) {
         $this->id = $vals['id'];
       }
+      if (isset($vals['errorMessage'])) {
+        $this->errorMessage = $vals['errorMessage'];
+      }
     }
   }
 
@@ -23163,6 +23197,13 @@ class ShowCompactResponseElement {
             $xfer += $input->skip($ftype);
           }
           break;
+        case 14:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->errorMessage);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -23241,6 +23282,11 @@ class ShowCompactResponseElement {
       $xfer += $output->writeI64($this->id);
       $xfer += $output->writeFieldEnd();
     }
+    if ($this->errorMessage !== null) {
+      $xfer += $output->writeFieldBegin('errorMessage', TType::STRING, 14);
+      $xfer += $output->writeString($this->errorMessage);
+      $xfer += $output->writeFieldEnd();
+    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index b05e61e..2414194 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -15551,6 +15551,7 @@ class CompactionInfoStruct:
    - workerId
    - start
    - highestWriteId
+   - errorMessage
   """
 
   thrift_spec = (
@@ -15567,9 +15568,10 @@ class CompactionInfoStruct:
     (10, TType.STRING, 'workerId', None, None, ), # 10
     (11, TType.I64, 'start', None, None, ), # 11
     (12, TType.I64, 'highestWriteId', None, None, ), # 12
+    (13, TType.STRING, 'errorMessage', None, None, ), # 13
   )
 
-  def __init__(self, id=None, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, toomanyaborts=None, state=None, workerId=None, start=None, highestWriteId=None,):
+  def __init__(self, id=None, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, toomanyaborts=None, state=None, workerId=None, start=None, highestWriteId=None, errorMessage=None,):
     self.id = id
     self.dbname = dbname
     self.tablename = tablename
@@ -15582,6 +15584,7 @@ class CompactionInfoStruct:
     self.workerId = workerId
     self.start = start
     self.highestWriteId = highestWriteId
+    self.errorMessage = errorMessage
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -15652,6 +15655,11 @@ class CompactionInfoStruct:
           self.highestWriteId = iprot.readI64()
         else:
           iprot.skip(ftype)
+      elif fid == 13:
+        if ftype == TType.STRING:
+          self.errorMessage = iprot.readString()
+        else:
+          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -15710,6 +15718,10 @@ class CompactionInfoStruct:
       oprot.writeFieldBegin('highestWriteId', TType.I64, 12)
       oprot.writeI64(self.highestWriteId)
       oprot.writeFieldEnd()
+    if self.errorMessage is not None:
+      oprot.writeFieldBegin('errorMessage', TType.STRING, 13)
+      oprot.writeString(self.errorMessage)
+      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -15739,6 +15751,7 @@ class CompactionInfoStruct:
     value = (value * 31) ^ hash(self.workerId)
     value = (value * 31) ^ hash(self.start)
     value = (value * 31) ^ hash(self.highestWriteId)
+    value = (value * 31) ^ hash(self.errorMessage)
     return value
 
   def __repr__(self):
@@ -15977,6 +15990,7 @@ class ShowCompactResponseElement:
    - endTime
    - hadoopJobId
    - id
+   - errorMessage
   """
 
   thrift_spec = (
@@ -15994,9 +16008,10 @@ class ShowCompactResponseElement:
     (11, TType.I64, 'endTime', None, None, ), # 11
     (12, TType.STRING, 'hadoopJobId', None, "None", ), # 12
     (13, TType.I64, 'id', None, None, ), # 13
+    (14, TType.STRING, 'errorMessage', None, None, ), # 14
   )
 
-  def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, state=None, workerid=None, start=None, runAs=None, hightestTxnId=None, metaInfo=None, endTime=None, hadoopJobId=thrift_spec[12][4], id=None,):
+  def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, state=None, workerid=None, start=None, runAs=None, hightestTxnId=None, metaInfo=None, endTime=None, hadoopJobId=thrift_spec[12][4], id=None, errorMessage=None,):
     self.dbname = dbname
     self.tablename = tablename
     self.partitionname = partitionname
@@ -16010,6 +16025,7 @@ class ShowCompactResponseElement:
     self.endTime = endTime
     self.hadoopJobId = hadoopJobId
     self.id = id
+    self.errorMessage = errorMessage
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -16085,6 +16101,11 @@ class ShowCompactResponseElement:
           self.id = iprot.readI64()
         else:
           iprot.skip(ftype)
+      elif fid == 14:
+        if ftype == TType.STRING:
+          self.errorMessage = iprot.readString()
+        else:
+          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -16147,6 +16168,10 @@ class ShowCompactResponseElement:
       oprot.writeFieldBegin('id', TType.I64, 13)
       oprot.writeI64(self.id)
       oprot.writeFieldEnd()
+    if self.errorMessage is not None:
+      oprot.writeFieldBegin('errorMessage', TType.STRING, 14)
+      oprot.writeString(self.errorMessage)
+      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -16177,6 +16202,7 @@ class ShowCompactResponseElement:
     value = (value * 31) ^ hash(self.endTime)
     value = (value * 31) ^ hash(self.hadoopJobId)
     value = (value * 31) ^ hash(self.id)
+    value = (value * 31) ^ hash(self.errorMessage)
     return value
 
   def __repr__(self):
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 868cf69..86c20bc 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -3466,6 +3466,7 @@ class CompactionInfoStruct
   WORKERID = 10
   START = 11
   HIGHESTWRITEID = 12
+  ERRORMESSAGE = 13
 
   FIELDS = {
     ID => {:type => ::Thrift::Types::I64, :name => 'id'},
@@ -3479,7 +3480,8 @@ class CompactionInfoStruct
     STATE => {:type => ::Thrift::Types::STRING, :name => 'state', :optional => true},
     WORKERID => {:type => ::Thrift::Types::STRING, :name => 'workerId', :optional => true},
     START => {:type => ::Thrift::Types::I64, :name => 'start', :optional => true},
-    HIGHESTWRITEID => {:type => ::Thrift::Types::I64, :name => 'highestWriteId', :optional => true}
+    HIGHESTWRITEID => {:type => ::Thrift::Types::I64, :name => 'highestWriteId', :optional => true},
+    ERRORMESSAGE => {:type => ::Thrift::Types::STRING, :name => 'errorMessage', :optional => true}
   }
 
   def struct_fields; FIELDS; end
@@ -3566,6 +3568,7 @@ class ShowCompactResponseElement
   ENDTIME = 11
   HADOOPJOBID = 12
   ID = 13
+  ERRORMESSAGE = 14
 
   FIELDS = {
     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
@@ -3580,7 +3583,8 @@ class ShowCompactResponseElement
     METAINFO => {:type => ::Thrift::Types::STRING, :name => 'metaInfo', :optional => true},
     ENDTIME => {:type => ::Thrift::Types::I64, :name => 'endTime', :optional => true},
     HADOOPJOBID => {:type => ::Thrift::Types::STRING, :name => 'hadoopJobId', :default => %q"None", :optional => true},
-    ID => {:type => ::Thrift::Types::I64, :name => 'id', :optional => true}
+    ID => {:type => ::Thrift::Types::I64, :name => 'id', :optional => true},
+    ERRORMESSAGE => {:type => ::Thrift::Types::STRING, :name => 'errorMessage', :optional => true}
   }
 
   def struct_fields; FIELDS; end
diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 61a94fe..06fd949 100644
--- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -1155,6 +1155,7 @@ struct CompactionInfoStruct {
     10: optional string workerId
     11: optional i64 start
     12: optional i64 highestWriteId
+    13: optional string errorMessage
 }
 
 struct OptionalCompactionInfoStruct {
@@ -1184,6 +1185,7 @@ struct ShowCompactResponseElement {
     11: optional i64 endTime,
     12: optional string hadoopJobId = "None",
     13: optional i64 id,
+    14: optional string errorMessage,
 }
 
 struct ShowCompactResponse {
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
index ba45f39..bf91ae7 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
@@ -59,6 +59,7 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
   public long highestWriteId;
   byte[] metaInfo;
   String hadoopJobId;
+  public String errorMessage;
 
   private String fullPartitionName = null;
   private String fullTableName = null;
@@ -117,7 +118,8 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
       "properties:" + properties + "," +
       "runAs:" + runAs + "," +
       "tooManyAborts:" + tooManyAborts + "," +
-      "highestWriteId:" + highestWriteId;
+      "highestWriteId:" + highestWriteId + "," +
+      "errorMessage:" + errorMessage;
   }
 
   @Override
@@ -159,6 +161,7 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
     fullCi.highestWriteId = rs.getLong(11);
     fullCi.metaInfo = rs.getBytes(12);
     fullCi.hadoopJobId = rs.getString(13);
+    fullCi.errorMessage = rs.getString(14);
     return fullCi;
   }
   static void insertIntoCompletedCompactions(PreparedStatement pStmt, CompactionInfo ci, long endTime) throws SQLException {
@@ -176,6 +179,7 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
     pStmt.setLong(12, ci.highestWriteId);
     pStmt.setBytes(13, ci.metaInfo);
     pStmt.setString(14, ci.hadoopJobId);
+    pStmt.setString(15, ci.errorMessage);
   }
 
   public static CompactionInfo compactionStructToInfo(CompactionInfoStruct cr) {
@@ -201,6 +205,9 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
     if (cr.isSetHighestWriteId()) {
       ci.highestWriteId = cr.getHighestWriteId();
     }
+    if (cr.isSetErrorMessage()) {
+      ci.errorMessage = cr.getErrorMessage();
+    }
     return ci;
   }
 
@@ -217,6 +224,7 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
     cr.setState(Character.toString(ci.state));
     cr.setWorkerId(ci.workerId);
     cr.setHighestWriteId(ci.highestWriteId);
+    cr.setErrorMessage(ci.errorMessage);
     return cr;
   }
 
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index aded6f5..bae23f7 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -369,7 +369,9 @@ class CompactionTxnHandler extends TxnHandler {
       ResultSet rs = null;
       try {
         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
-        pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?");
+        pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, "
+            + "CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, "
+            + "CQ_HADOOP_JOB_ID, CQ_ERROR_MESSAGE from COMPACTION_QUEUE WHERE CQ_ID = ?");
         pStmt.setLong(1, info.id);
         rs = pStmt.executeQuery();
         if(rs.next()) {
@@ -389,7 +391,10 @@ class CompactionTxnHandler extends TxnHandler {
           LOG.debug("Going to rollback");
           dbConn.rollback();
         }
-        pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
+        pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, "
+            + "CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, "
+            + "CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID, CC_ERROR_MESSAGE) "
+            + "VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?,?)");
         info.state = SUCCEEDED_STATE;
         CompactionInfo.insertIntoCompletedCompactions(pStmt, info, getDbTime(dbConn));
         updCount = pStmt.executeUpdate();
@@ -1051,10 +1056,15 @@ class CompactionTxnHandler extends TxnHandler {
       Statement stmt = null;
       PreparedStatement pStmt = null;
       ResultSet rs = null;
+      // the error message related to the failure is wrapped inside CompactionInfo
+      // fetch this info, since ci will be reused in subsequent queries
+      String errorMessage = ci.errorMessage;
       try {
         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
         stmt = dbConn.createStatement();
-        pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?");
+        pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, "
+            + "CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, "
+            + "CQ_HADOOP_JOB_ID, CQ_ERROR_MESSAGE from COMPACTION_QUEUE WHERE CQ_ID = ?");
         pStmt.setLong(1, ci.id);
         rs = pStmt.executeQuery();
         if(rs.next()) {
@@ -1088,7 +1098,13 @@ class CompactionTxnHandler extends TxnHandler {
         close(rs, stmt, null);
         closeStmt(pStmt);
 
-        pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
+        pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, "
+            + "CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, "
+            + "CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID, CC_ERROR_MESSAGE) "
+            + "VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?,?)");
+        if (errorMessage != null) {
+          ci.errorMessage = errorMessage;
+        }
         CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn));
         int updCount = pStmt.executeUpdate();
         LOG.debug("Going to commit");
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index da5dd61..7f1c2f8 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -153,7 +153,8 @@ public final class TxnDbUtil {
           " CQ_RUN_AS varchar(128)," +
           " CQ_HIGHEST_WRITE_ID bigint," +
           " CQ_META_INFO varchar(2048) for bit data," +
-          " CQ_HADOOP_JOB_ID varchar(32))");
+          " CQ_HADOOP_JOB_ID varchar(32)," +
+          " CQ_ERROR_MESSAGE clob)");
 
       stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
       stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
@@ -172,7 +173,8 @@ public final class TxnDbUtil {
           " CC_RUN_AS varchar(128)," +
           " CC_HIGHEST_WRITE_ID bigint," +
           " CC_META_INFO varchar(2048) for bit data," +
-          " CC_HADOOP_JOB_ID varchar(32))");
+          " CC_HADOOP_JOB_ID varchar(32)," +
+          " CC_ERROR_MESSAGE clob)");
 
       stmt.execute("CREATE TABLE AUX_TABLE (" +
         " MT_KEY1 varchar(128) NOT NULL," +
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index c2c97d9..f53aebe 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -3191,9 +3191,11 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
         stmt = dbConn.createStatement();
         String s = "SELECT \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", \"CQ_STATE\", \"CQ_TYPE\", \"CQ_WORKER_ID\", " +
           //-1 because 'null' literal doesn't work for all DBs...
-          "\"CQ_START\", -1 \"CC_END\", \"CQ_RUN_AS\", \"CQ_HADOOP_JOB_ID\", \"CQ_ID\" FROM \"COMPACTION_QUEUE\" UNION ALL " +
+          "\"CQ_START\", -1 \"CC_END\", \"CQ_RUN_AS\", \"CQ_HADOOP_JOB_ID\", \"CQ_ID\", \"CQ_ERROR_MESSAGE\" " +
+          "FROM \"COMPACTION_QUEUE\" UNION ALL " +
           "SELECT \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", \"CC_WORKER_ID\", " +
-          "\"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", \"CC_ID\" FROM \"COMPLETED_COMPACTIONS\""; //todo: sort by cq_id?
+          "\"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", \"CC_ID\", \"CC_ERROR_MESSAGE\"" +
+          " FROM \"COMPLETED_COMPACTIONS\""; //todo: sort by cq_id?
         //what I want is order by cc_end desc, cc_start asc (but derby has a bug https://issues.apache.org/jira/browse/DERBY-6013)
         //to sort so that currently running jobs are at the end of the list (bottom of screen)
         //and currently running ones are in sorted by start time
@@ -3224,6 +3226,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
           e.setRunAs(rs.getString(9));
           e.setHadoopJobId(rs.getString(10));
           e.setId(rs.getLong(11));
+          e.setErrorMessage(rs.getString(12));
           response.addToCompacts(e);
         }
         LOG.debug("Going to rollback");
diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index 6710271..3be5707 100644
--- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@ -587,7 +587,8 @@ CREATE TABLE COMPACTION_QUEUE (
   CQ_RUN_AS varchar(128),
   CQ_HIGHEST_WRITE_ID bigint,
   CQ_META_INFO varchar(2048) for bit data,
-  CQ_HADOOP_JOB_ID varchar(32)
+  CQ_HADOOP_JOB_ID varchar(32),
+  CQ_ERROR_MESSAGE clob
 );
 
 CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
@@ -609,7 +610,8 @@ CREATE TABLE COMPLETED_COMPACTIONS (
   CC_RUN_AS varchar(128),
   CC_HIGHEST_WRITE_ID bigint,
   CC_META_INFO varchar(2048) for bit data,
-  CC_HADOOP_JOB_ID varchar(32)
+  CC_HADOOP_JOB_ID varchar(32),
+  CC_ERROR_MESSAGE clob
 );
 
 CREATE TABLE AUX_TABLE (
diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
index ae0a325..a7d8da4 100644
--- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
@@ -48,6 +48,11 @@ CREATE INDEX LASTUPDATETIMEINDEX ON APP.SCHEDULED_EXECUTIONS (LAST_UPDATE_TIME);
 CREATE INDEX SCHEDULED_EXECUTIONS_SCHQID ON APP.SCHEDULED_EXECUTIONS (SCHEDULED_QUERY_ID);
 CREATE UNIQUE INDEX SCHEDULED_EXECUTIONS_UNIQUE_ID ON APP.SCHEDULED_EXECUTIONS (SCHEDULED_EXECUTION_ID);
 
+-- HIVE-22729
+ALTER TABLE COMPACTION_QUEUE ADD CQ_ERROR_MESSAGE clob;
+ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE clob;
+
 -- This needs to be the last thing done.  Insert any changes above this line.
 UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
 
+
diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
index 221d4f1..29b7b3f 100644
--- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
@@ -1012,6 +1012,7 @@ CREATE TABLE COMPACTION_QUEUE(
     CQ_HIGHEST_WRITE_ID bigint NULL,
     CQ_META_INFO varbinary(2048) NULL,
 	CQ_HADOOP_JOB_ID nvarchar(128) NULL,
+	CQ_ERROR_MESSAGE varchar(max) NULL,
 PRIMARY KEY CLUSTERED
 (
 	CQ_ID ASC
@@ -1033,6 +1034,7 @@ CREATE TABLE COMPLETED_COMPACTIONS (
     CC_HIGHEST_WRITE_ID bigint NULL,
     CC_META_INFO varbinary(2048) NULL,
 	CC_HADOOP_JOB_ID nvarchar(128) NULL,
+	CC_ERROR_MESSAGE varchar(max) NULL,
 PRIMARY KEY CLUSTERED
 (
 	CC_ID ASC
diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
index bc98d5f..72733c9 100644
--- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
@@ -22,6 +22,10 @@ UPDATE TAB_COL_STATS SET ENGINE = 'hive' WHERE ENGINE IS NULL;
 ALTER TABLE PART_COL_STATS ADD ENGINE nvarchar(128);
 UPDATE PART_COL_STATS SET ENGINE = 'hive' WHERE ENGINE IS NULL;
 
+-- HIVE-22729
+ALTER TABLE COMPACTION_QUEUE ADD CQ_ERROR_MESSAGE varchar(max) NULL;
+ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE varchar(max) NULL;
+
 -- These lines need to be last.  Insert any changes above.
 UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE;
diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index dd761a6..69e2cef 100644
--- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@ -1070,7 +1070,8 @@ CREATE TABLE COMPACTION_QUEUE (
   CQ_RUN_AS varchar(128),
   CQ_HIGHEST_WRITE_ID bigint,
   CQ_META_INFO varbinary(2048),
-  CQ_HADOOP_JOB_ID varchar(32)
+  CQ_HADOOP_JOB_ID varchar(32),
+  CQ_ERROR_MESSAGE mediumtext
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
 CREATE TABLE COMPLETED_COMPACTIONS (
@@ -1087,7 +1088,8 @@ CREATE TABLE COMPLETED_COMPACTIONS (
   CC_RUN_AS varchar(128),
   CC_HIGHEST_WRITE_ID bigint,
   CC_META_INFO varbinary(2048),
-  CC_HADOOP_JOB_ID varchar(32)
+  CC_HADOOP_JOB_ID varchar(32),
+  CC_ERROR_MESSAGE mediumtext
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
 CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
index 6a040a6..c81d08a 100644
--- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
@@ -52,7 +52,10 @@ CREATE INDEX IDX_SCHEDULED_EXECUTIONS_LAST_UPDATE_TIME ON SCHEDULED_EXECUTIONS (
 CREATE INDEX IDX_SCHEDULED_EXECUTIONS_SCHEDULED_QUERY_ID ON SCHEDULED_EXECUTIONS (SCHEDULED_QUERY_ID);
 CREATE UNIQUE INDEX UNIQUE_SCHEDULED_EXECUTIONS_ID ON SCHEDULED_EXECUTIONS (SCHEDULED_EXECUTION_ID);
 
+-- HIVE-22729
+ALTER TABLE COMPACTION_QUEUE ADD CQ_ERROR_MESSAGE mediumtext;
+ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE mediumtext;
+
 -- These lines need to be last.  Insert any changes above.
 UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE;
-
diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
index f5ec1ba..cb95a42 100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@ -1051,7 +1051,8 @@ CREATE TABLE COMPACTION_QUEUE (
   CQ_RUN_AS varchar(128),
   CQ_HIGHEST_WRITE_ID NUMBER(19),
   CQ_META_INFO BLOB,
-  CQ_HADOOP_JOB_ID varchar2(32)
+  CQ_HADOOP_JOB_ID varchar2(32),
+  CQ_ERROR_MESSAGE CLOB
 ) ROWDEPENDENCIES;
 
 CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
@@ -1073,7 +1074,8 @@ CREATE TABLE COMPLETED_COMPACTIONS (
   CC_RUN_AS varchar(128),
   CC_HIGHEST_WRITE_ID NUMBER(19),
   CC_META_INFO BLOB,
-  CC_HADOOP_JOB_ID varchar2(32)
+  CC_HADOOP_JOB_ID varchar2(32),
+  CC_ERROR_MESSAGE CLOB
 ) ROWDEPENDENCIES;
 
 CREATE TABLE AUX_TABLE (
diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
index c7738be..65057be 100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
@@ -52,7 +52,10 @@ CREATE TABLE "SCHEDULED_EXECUTIONS" (
 CREATE INDEX IDX_SCHEDULED_EX_LAST_UPDATE ON "SCHEDULED_EXECUTIONS" ("LAST_UPDATE_TIME");
 CREATE INDEX IDX_SCHEDULED_EX_SQ_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID");
 
+-- HIVE-22729
+ALTER TABLE COMPACTION_QUEUE ADD CQ_ERROR_MESSAGE CLOB;
+ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE CLOB;
+
 -- These lines need to be last.  Insert any changes above.
 UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual;
-
diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index 455f98b..0fcb88a 100644
--- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@ -1737,7 +1737,8 @@ CREATE TABLE "COMPACTION_QUEUE" (
   "CQ_RUN_AS" varchar(128),
   "CQ_HIGHEST_WRITE_ID" bigint,
   "CQ_META_INFO" bytea,
-  "CQ_HADOOP_JOB_ID" varchar(32)
+  "CQ_HADOOP_JOB_ID" varchar(32),
+  "CQ_ERROR_MESSAGE" text
 );
 
 CREATE TABLE "NEXT_COMPACTION_QUEUE_ID" (
@@ -1759,7 +1760,8 @@ CREATE TABLE "COMPLETED_COMPACTIONS" (
   "CC_RUN_AS" varchar(128),
   "CC_HIGHEST_WRITE_ID" bigint,
   "CC_META_INFO" bytea,
-  "CC_HADOOP_JOB_ID" varchar(32)
+  "CC_HADOOP_JOB_ID" varchar(32),
+  "CC_ERROR_MESSAGE" text
 );
 
 CREATE TABLE "AUX_TABLE" (
diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
index 5c39b0d..2347c69 100644
--- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
@@ -183,6 +183,9 @@ ALTER TABLE "WRITE_SET" RENAME COLUMN ws_txnid TO "WS_TXNID";
 ALTER TABLE "WRITE_SET" RENAME COLUMN ws_commit_id TO "WS_COMMIT_ID";
 ALTER TABLE "WRITE_SET" RENAME COLUMN ws_operation_type TO "WS_OPERATION_TYPE";
 
+-- HIVE-22729
+ALTER TABLE COMPACTION_QUEUE ADD CQ_ERROR_MESSAGE clob;
+ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE clob;
 
 -- These lines need to be last. Insert any changes above.
 UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;