You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by nz...@apache.org on 2011/03/09 08:57:50 UTC

svn commit: r1079690 - /hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java

Author: nzhang
Date: Wed Mar  9 07:57:50 2011
New Revision: 1079690

URL: http://svn.apache.org/viewvc?rev=1079690&view=rev
Log:
HIVE-2034. Backport HIVE-1991 after overridden by HIVE-1950 (Siying Dong via Ning Zhang)

Modified:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java?rev=1079690&r1=1079689&r2=1079690&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java Wed Mar  9 07:57:50 2011
@@ -42,6 +42,7 @@ import org.apache.hadoop.mapred.JobClien
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RunningJob;
 import org.apache.hadoop.mapred.TaskCompletionEvent;
+import org.apache.hadoop.mapred.TaskReport;
 
 public class HadoopJobExecHelper {
   
@@ -207,6 +208,7 @@ public class HadoopJobExecHelper {
     StringBuilder errMsg = new StringBuilder();
     long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL);
     boolean initializing = true;
+    boolean initOutputPrinted = false;
     while (!rj.isComplete()) {
       try {
         Thread.sleep(pullInterval);
@@ -222,6 +224,41 @@ public class HadoopJobExecHelper {
         initializing = false;
       }
 
+      if (!initOutputPrinted) {
+        SessionState ss = SessionState.get();
+
+        String logMapper;
+        String logReducer;
+	
+        TaskReport[] mappers = jc.getMapTaskReports(rj.getJobID());
+        if (mappers == null) {
+          logMapper = "no information for number of mappers; ";
+        } else {
+          int numMap = mappers.length;
+          if (ss != null) {
+            ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
+              Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
+          }
+          logMapper = "number of mappers: " + numMap + "; ";
+        }
+	
+        TaskReport[] reducers = jc.getReduceTaskReports(rj.getJobID());
+        if (reducers == null) {
+          logReducer = "no information for number of reducers. ";
+        } else {
+          int numReduce = reducers.length;
+          if (ss != null) {
+            ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
+              Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
+          }
+          logReducer = "number of reducers: " + numReduce;
+        }
+
+        console
+            .printInfo("Hadoop job information for " + getId() + ": " + logMapper + logReducer);
+        initOutputPrinted = true;
+      }
+
       RunningJob newRj = jc.getJob(rj.getJobID());
       if (newRj == null) {
         // under exceptional load, hadoop may not be able to look up status