You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by we...@apache.org on 2017/04/13 17:44:06 UTC

hive git commit: HIVE-16193 : Hive show compactions not reflecting the status of the application, ADDENDUM

Repository: hive
Updated Branches:
  refs/heads/master cbab5b29f -> ee8b55a4f


HIVE-16193 : Hive show compactions not reflecting the status of the application, ADDENDUM


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ee8b55a4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ee8b55a4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ee8b55a4

Branch: refs/heads/master
Commit: ee8b55a4fb13daaa1dbb17413db2492b6aad0ccd
Parents: cbab5b2
Author: Wei Zheng <we...@apache.org>
Authored: Thu Apr 13 10:44:26 2017 -0700
Committer: Wei Zheng <we...@apache.org>
Committed: Thu Apr 13 10:44:26 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/txn/compactor/CompactorMR.java    | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ee8b55a4/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index cca06a5..f83b6db 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -233,7 +233,7 @@ public class CompactorMR {
         launchCompactionJob(jobMinorCompact,
           null, CompactionType.MINOR, null,
           parsedDeltas.subList(jobSubId * maxDeltastoHandle, (jobSubId + 1) * maxDeltastoHandle),
-          maxDeltastoHandle, -1, conf, txnHandler, ci.id);
+          maxDeltastoHandle, -1, conf, txnHandler, ci.id, jobName);
       }
       //now recompute state since we've done minor compactions and have different 'best' set of deltas
       dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, txns);
@@ -271,7 +271,7 @@ public class CompactorMR {
     }
 
     launchCompactionJob(job, baseDir, ci.type, dirsToSearch, dir.getCurrentDirectories(),
-      dir.getCurrentDirectories().size(), dir.getObsolete().size(), conf, txnHandler, ci.id);
+      dir.getCurrentDirectories().size(), dir.getObsolete().size(), conf, txnHandler, ci.id, jobName);
 
     su.gatherStats();
   }
@@ -279,7 +279,7 @@ public class CompactorMR {
                                    StringableList dirsToSearch,
                                    List<AcidUtils.ParsedDelta> parsedDeltas,
                                    int curDirNumber, int obsoleteDirNumber, HiveConf hiveConf,
-                                   TxnStore txnHandler, long id) throws IOException {
+                                   TxnStore txnHandler, long id, String jobName) throws IOException {
     job.setBoolean(IS_MAJOR, compactionType == CompactionType.MAJOR);
     if(dirsToSearch == null) {
       dirsToSearch = new StringableList();
@@ -314,7 +314,8 @@ public class CompactorMR {
     txnHandler.setHadoopJobId(rj.getID().toString(), id);
     rj.waitForCompletion();
     if (!rj.isSuccessful()) {
-      throw new IOException("Job failed!");
+      throw new IOException(compactionType == CompactionType.MAJOR ? "Major" : "Minor" +
+          " compactor job failed for " + jobName + "! Hadoop JobId: " + rj.getID() );
     }
   }
   /**